import cv2
import numpy as np
import torch
import copy

def __get_split_merge__(img):
    h,_,_ = img.shape
    img_upper = img[0:int(5/12*h),:]
    img_lower = img[int(1/3*h):,:]
    img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0]))
    new_img = np.hstack((img_upper,img_lower))
    return new_img

color=['黑色','蓝色','绿色','白色','黄色']    
plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品"
mean_value, std_value=(0.588,0.193)

# 从预测序列中解码车牌字符识别结果
def __decode_ctc_plate__(predictions):
    """
    对车牌字符识别结果进行解码，去除连续重复的字符，并记录字符在原始预测序列中的位置。

    参数：
    predictions：预测的字符序列。

    返回值：
    unique_predictions：去除了连续重复字符后的字符序列。
    indices：去重后字符在原始预测序列中的位置索引。
    """
    previous_char = 0  # 用于记录前一个字符
    unique_predictions = []  # 用于存储去重后的字符
    indices = []  # 用于存储去重后字符在原始序列中的索引

    for index, current_char in enumerate(predictions):
        # 如果当前字符不是空白且不同于前一个字符
        if current_char != 0 and current_char != previous_char:
            unique_predictions.append(current_char)
            indices.append(index)
        previous_char = current_char  # 更新前一个字符为当前字符

    return unique_predictions, indices

# 图像预处理函数，用于将图像转换为模型输入格式
def __image_pre_processing__(img, device):
    img = cv2.resize(img, (168,48))
    img = np.reshape(img, (48, 168, 3))

    # normalize
    img = img.astype(np.float32)
    img = (img / 255. - mean_value) / std_value
    img = img.transpose([2, 0, 1])
    img = torch.from_numpy(img)

    img = img.to(device)
    img = img.view(1, *img.size())
    return img

# 车牌字符识别函数
def __model_ocr__(img, device, model, is_color=False):
    img_c = copy.deepcopy(img)
    input = __image_pre_processing__(img_c, device)
    if is_color:  # 是否识别颜色
        preds, color_preds = model(input)
        color_preds = torch.softmax(color_preds,dim=-1)
        color_conf, color_index = torch.max(color_preds,dim=-1)
        color_conf = color_conf.item()
    else:
        preds = model(input)
    preds = torch.softmax(preds, dim=-1)
    prob, index = preds.max(dim=-1)
    index = index.view(-1).detach().cpu().numpy()
    prob = prob.view(-1).detach().cpu().numpy()
    
    # 对预测结果进行解码
    decoded_preds, decoded_index = __decode_ctc_plate__(index)
    prob = prob[decoded_index]
    plate = ""
    for i in decoded_preds:
        plate += plateName[i]
    if is_color:
        return plate, prob, color[color_index], color_conf  # 返回车牌号以及每个字符的概率,以及颜色，和颜色的概率
    else:
        return plate, prob

# 将识别结果包装为字典格式
def __wrap_as_dict__(plate_no, plate_color, color_conf, rect, detect_conf, roi_height, plate_type):
    result = {}
    # 将车牌信息整合到字典中
    result['plateNo'] = plate_no            # 车牌号
    result['plateColor'] = plate_color      # 车牌颜色
    result['rect'] = rect                   # 车牌roi区域
    result['detectConf'] = detect_conf      # 检测区域得分
    result['roiHeight'] = roi_height        # 车牌高度
    result['colorConf'] = color_conf        # 颜色得分
    result['plateType'] = plate_type        # 单双层 0单层 1双层
    return result

# 车牌识别函数
def recognize(img, detection_outputs, plate_rec_model, device):
    result = []
    for detection in detection_outputs:
        
        # 遍历
        detection = detection.squeeze().cpu().numpy().tolist()
        rect = [int(x) for x in detection[:4]]
        label = detection[-1]

        # ROI 区域
        roi = img[rect[1]:rect[3], rect[0]:rect[2]]

        # 双层车牌，分割重拼接
        if int(label):
            roi = __get_split_merge__(roi)
        
        # Detection 置信度
        detect_conf = detection[4]

        # OCR
        plate_no, _, plate_color, color_conf = __model_ocr__(
            roi, device, plate_rec_model, is_color=True
        )
        
        # 包装为字典
        result.append(
            __wrap_as_dict__(
                plate_no,
                plate_color,
                color_conf,
                rect,
                detect_conf,
                roi.shape[0],
                int(label)
            )
        )
        print(result[-1])
    return result