# from ShuffleNetV2 import shufflenetv2
import torch
import torchvision.transforms as transforms

from torch.autograd import Variable
from collections import OrderedDict
import time
from torchvision.datasets.folder import default_loader

# import dataset
from PIL import Image
import os

import sys
import string
from PIL import Image
import argparse
import torch
from torch import nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.utils.data
import math
import cv2
import numpy as np
import time
import copy
# import onnx
import onnxruntime
from collections import OrderedDict
# from onnx import helper
# from onnx import AttributeProto, TensorProto, GraphProto
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

def letterbox_image(image, size, fillValue=[128, 128, 128]):
    '''
    resize image with unchanged aspect ratio using padding
    '''
    image_w, image_h = image.size
    w, h = size
    new_w = int(image_w * min(w * 1.0 / image_w, h * 1.0 / image_h))
    new_h = int(image_h * min(w * 1.0 / image_w, h * 1.0 / image_h))

    resized_image = image.resize((new_w, new_h), Image.BICUBIC)
    # if fillValue is None:
    #     fillValue = [int(x.mean()) for x in cv2.split(np.array(image))]
    boxed_image = Image.new('RGB', size, tuple(fillValue))

    box_w = (224 - new_w) // 2
    box_h = (224 - new_h) // 2
    boxed_image.paste(resized_image, (box_w,box_h))
    return boxed_image, new_w / image_w


def sigmoid(x):
    return 1/(1+np.exp(-x))

def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = np.reshape(anchors, [1, 1, 1, num_anchors, 2])

    grid_shape = np.shape(feats)[1:3] # height, width
    grid_y =np.tile(np.reshape(np.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x =np.tile(np.reshape(np.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = np.concatenate([grid_x, grid_y],axis=-1)
    grid = np.array(grid, dtype=feats.dtype)

    feats = np.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (sigmoid(feats[..., :2]) + grid) / np.array(grid_shape[::-1], dtype=feats.dtype)
    box_wh = np.exp(feats[..., 2:4]) * anchors_tensor / np.array(input_shape[::-1], dtype=feats.dtype)
    box_confidence = sigmoid(feats[..., 4:5])
    box_class_probs = sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs

def box_layer(inputs, anchors, num_classes):
    y1, y2, y3, image_shape, input_shape = inputs
    out = [y1, y2, y3]

    num_layers = len(out)
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
    boxes = []
    scores = []
    input_shape = np.array(input_shape, dtype=np.float32)
    image_shape = np.array(image_shape, dtype=np.float32)
    # new_shape   = np.round(image_shape * np.min(input_shape/image_shape))
    # offset = (input_shape-new_shape)/2./input_shape
    # scale = input_shape/new_shape

    for lay in range(num_layers):
        box_xy, box_wh, box_confidence, box_class_probs = yolo_head(out[lay], anchors[anchor_mask[lay]], num_classes,
                                                                    input_shape)
        # box_xy = (box_xy - offset) * scale
        # box_wh = box_wh*scale

        box_score = box_confidence * box_class_probs
        box_score = np.reshape(box_score, [-1, num_classes])

        box_mins = box_xy - (box_wh / 2.)
        box_maxes = box_xy + (box_wh / 2.)
        box = np.concatenate([
            box_mins[..., 0:1],  # xmin
            box_mins[..., 1:2],  # ymin
            box_maxes[..., 0:1],  # xmax
            box_maxes[..., 1:2]  # ymax
        ], axis=-1)

        box = np.reshape(box, [-1, 4])

        boxes.append(box)

        scores.append(box_score)
    boxes = np.concatenate(boxes, axis=0)
    scores = np.concatenate(scores, axis=0)

    boxes *= np.concatenate([image_shape[::-1], image_shape[::-1]])

    try:
        # return boxes,scores[...,1]
        return boxes, scores
    except:
        return boxes, scores[..., 0]


if __name__ == '__main__':
    batch_size = 1

    model_path = './mjh_train_ReCTS_cn_en_v4-199.onnx'
    # classes = 4
    # labels = ["0","180","270","90"]
    # width_mult = 1.5


    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225])
    resize = transforms.Resize((608,608))
    # rotation = transforms.RandomRotation(180)

    # model = shufflenetv2(n_class=classes, width_mult=width_mult).cuda()
    # trainWeights = torch.load(model_path,map_location="cuda:0")
    # modelWeights = OrderedDict()
    # for k,v in trainWeights.items():
    #     name = k.replace('module.','')
    #     modelWeights[name] = v
    # model.load_state_dict(modelWeights)
    # model.eval()
    
    img_path = "/data1/lhw/workspace/OCR/utils/ONNX_Test/2.jpg"
    image = default_loader(img_path)
    image, scale = letterbox_image(image, (608, 608), fillValue=(255, 255, 255))

    image = transforms.ToTensor()(image)
    # image = torch.from_numpy(img)
    image = normalize(image)
    # image = image.cuda()
    image = image.view(1,*image.size())
    image = Variable(image).permute(0,2,3,1)
    
    # onnx_inputs = copy.deepcopy(image).cpu()
    # onnx_file_path = './shuffle_v2_angle_net.onnx'

    # if not os.path.exists(onnx_file_path):
    #     print('\t>>write onnx: {}'.format(onnx_file_path))
    #     torch_out = torch.onnx.export(
    #             model,
    #             image,
    #             onnx_file_path,
    #             verbose=True,
    #             input_names=['input'],
    #             output_names=['output'],
    #             export_params=True)
    
    onnx_inputs = copy.deepcopy(image).cpu()
    start_time = time.time()
    session = onnxruntime.InferenceSession("/data1/lhw/workspace/OCR/utils/ONNX_Test/mjh_train_ReCTS_cn_en_v4-199.onnx")
    input_name = session.get_inputs()[0].name
    print('\t>>input: {}, {}, {}'.format(session.get_inputs()[0].name, session.get_inputs()[0].shape, session.get_inputs()[0].type))
    # _outputs = session.get_outputs()
    # for kk in range(len(_outputs)):
    #     _out = _outputs[kk]
    #     print('\t>>out-{}: {}, {}, {}'.format(kk, _out.name, _out.shape, _out.type))

    x = np.array(onnx_inputs).astype(np.float32)
    pre_start_time = time.time()
    p = session.run(None, {input_name: x})
    pre_end_time = time.time() - pre_start_time

    outs = p[0]

    print('============================================================================')
    # print('>>summary Output:', Onnx_str)
    print('onnx out: {} \n{}'.format(np.shape(outs), outs))
    print('onnx pre time: {}'.format(pre_end_time))
    for idx,result in enumerate(outs):
        result = np.array(result)
        print(result.shape)
    # np.save("./out_%s.npy" % idx,result)
    # print(result)
    outs = [np.array(out) for out in outs]

############ load out
# out1 = np.load("./out_0.npy")
# out2 = np.load("./out_1.npy")
# out3 = np.load("./out_2.npy")
# outs = [out1,out2,out3]

### 20210326 小文本框生成代码




def box_layer(inputs, anchors, num_classes):
    y1, y2, y3, image_shape, input_shape = inputs
    out = [y1, y2, y3]

    num_layers = len(out)
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
    boxes = []
    scores = []
    input_shape = np.array(input_shape, dtype=np.float32)
    image_shape = np.array(image_shape, dtype=np.float32)
    # new_shape   = np.round(image_shape * np.min(input_shape/image_shape))
    # offset = (input_shape-new_shape)/2./input_shape
    # scale = input_shape/new_shape

    for lay in range(num_layers):
        box_xy, box_wh, box_confidence, box_class_probs = yolo_head(out[lay], anchors[anchor_mask[lay]], num_classes,
                                                                    input_shape)
        # box_xy = (box_xy - offset) * scale
        # box_wh = box_wh*scale

        box_score = box_confidence * box_class_probs
        box_score = np.reshape(box_score, [-1, num_classes])

        box_mins = box_xy - (box_wh / 2.)
        box_maxes = box_xy + (box_wh / 2.)
        box = np.concatenate([
            box_mins[..., 0:1],  # xmin
            box_mins[..., 1:2],  # ymin
            box_maxes[..., 0:1],  # xmax
            box_maxes[..., 1:2]  # ymax
        ], axis=-1)

        box = np.reshape(box, [-1, 4])

        boxes.append(box)

        scores.append(box_score)
    boxes = np.concatenate(boxes, axis=0)
    scores = np.concatenate(scores, axis=0)

    boxes *= np.concatenate([image_shape[::-1], image_shape[::-1]])

    try:
        # return boxes,scores[...,1]
        return boxes, scores
    except:
        return boxes, scores[..., 0]

    # return concatenate([boxes,scores],axis=1)

results = [*outs,image_shape,input_shape]
box,scores = box_layer(results,anchors, num_classes)
print(box.shape)
print(scores.shape)

np.save("./boxes.npy",box)  # shape: [(19*19 + 38*38 + 76*76)*3 = 22743 ,4]
np.save("./scores.npy",scores)  # shape: [22743,3]



res = drawBox(img,box[np.max(scores,axis=-1)>0.01])
cv2.imwrite("./res.jpg",res)
