import numpy as np
import cv2


# 显示图片
def cv_show(name, img):
    cv2.imshow(name, img)
    cv2.waitKey()
    cv2.destroyAllWindows()


# 对检测出的轮廓按顺序排序
def sort_contours(cnts, method="left2right"):
    reverse = False # 默认不反转
    i = 0 # 决定以哪个值来排序
    if method == "right2left" or method == "bottom2top":
        reverse = True
    if method == "top2bottom" or method == "bottom2top":
        i = 1
    boundingRects = [cv2.boundingRect(c) for c in cnts] # 每个boundingRect为[x, y, w, h]
    (cnts, boundingRects) = zip(*sorted(zip(cnts, boundingRects),
                                        key=lambda cb:cb[1][i], # cb为重组后的(cnt, boundingRect)
                                        reverse=reverse))
    return cnts


# 读取模板图，并转为灰度图
template = cv2.imread("images\\template.png", 0)
# 二值化
template = cv2.threshold(template, 127, 255, cv2.THRESH_BINARY_INV)[1]
cv_show("template", template)

# 检测模版数字轮廓
if int(cv2.__version__.split('.')[0]) >= 4:
    temp_cnts, hierarchy = cv2.findContours(template.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
    ref, temp_cnts, hierarchy = cv2.findContours(template.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

#cv2.drawContours(Template, temp_cnts, -1, (0, 0, 255), 3)
#cv_show("template_cnts", Template)

# 不能保证检测到的轮廓顺序就是想要的，需对轮廓进行排序
temp_cnts = sort_contours(temp_cnts, method="left2right")

# 分离每个数字模版，并按顺序一对一存储在字典里
temp_digits = {}
for (i, c) in enumerate(temp_cnts):
    (x, y, w, h) = cv2.boundingRect(c)
    digit = template[y:y+h, x:x+w]
    digit = cv2.resize(digit, (60, 100))
    temp_digits[i] = digit

# for i in range(len(temp_digits)):
#     cv_show("template_digits", temp_digits[i])


# 读取待识别图像
image = cv2.imread("images\\card.png")
print(np.array(image).shape)
# 重置待识别图像大小，避免因为大小不同而影响后续操作
image = cv2.resize(image, (450, int(450 * image.shape[0] / image.shape[1])))
print(np.array(image).shape)

# 转换为灰度图
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv_show("gray", gray)

# 梯度计算，检测边缘
# X方向
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
# 进行线性归一化，将原本图像像素值的任意范围映射到0~255，增强对比度
(minX, maxX) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minX) / (maxX - minX)))
gradX = gradX.astype("uint8")
#cv_show("gradX", gradX)
# Y方向
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
gradY = np.absolute(gradY)
(minY, maxY) = (np.min(gradY), np.max(gradY))
gradY = (255 * ((gradY - minY) / (maxY - minY)))
gradY = gradY.astype("uint8")
#cv_show("gradY", gradY)
# 融合
grad = cv2.addWeighted(gradX, 0.5, gradY, 0.5, 0)
#cv_show("grad", grad)

# 闭操作，使数字轮廓连起来，形成数字块。可以减少后续轮廓检测数量，简化排除无用目标数
# 设置卷积核
rectKernal = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 3))
close = cv2.morphologyEx(grad, cv2.MORPH_CLOSE, rectKernal)
#cv_show("CLOSE", close)

# 二值化
# THRESH_OTSU会自动寻找合适的阈值，需把阈值参数设置为0
thresh = cv2.threshold(close, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
#cv_show("thresh", thresh)

# 检测轮廓
if int(cv2.__version__.split('.')[0]) >= 4:
    img_cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
    ref, img_cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# img = image.copy()
# cv2.drawContours(img, img_cnts, -1, (0, 0, 255), 2)
# cv_show("img_cnts", img)

# img1 = image.copy()
# cv2.drawContours(img1, img_cnts, 9, (0, 0, 255), 2)
# cv_show("img_cnts", img1)

# 遍历轮廓，排除不需要的目标，只留下需要进行数字识别的部分
targets = [] # 存放有用目标的外接矩形信息
for (i, c) in enumerate(img_cnts):
    (x, y, w, h) = cv2.boundingRect(c)
    #print(f"current bound {i}: x = {x}, y = {y}, w = {w}, h = {h}")
    if x < 225 and (y > 170 and y < 220) and (h >= 15 and h <= 25):
        print(f"target bound: x = {x}, y = {y}, w = {w}, h = {h}")
        targets.append((x, y, w, h))
# 将存储的需要目标的外接矩形信息 按照x从左到右排序
targets = sorted(targets, key=lambda t:t[0])
print(targets)


result = [] # 存放最终的识别结果
# 遍历识别每个目标框内的数字信息
for (i, (X, Y, W, H)) in enumerate(targets):

    # 在原图中截取出目标框所包含的内容
    group = gray[Y - 5:Y + H + 5, X - 5:X + W + 5] # 向外多取一些

    group = cv2.threshold(group, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    #cv_show("group", group)

    # 检测边缘并排序
    if int(cv2.__version__.split('.')[0]) >= 4:
        group_digit_cnts, hierarchy = cv2.findContours(group.cpoy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    else:
        group_, group_digit_cnts, hierarchy = cv2.findContours(group.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # g = image[Y - 5:Y + H + 5, X - 5:X + W + 5]
    # #group_c = group.copy() # group是二值化图像，是画不出来有颜色的轮廓的
    # cv2.drawContours(g, group_digit_cnts, -1, (0, 0, 255), 1)
    # cv_show("group_cnts", g)

    group_digit_cnts = sort_contours(group_digit_cnts, method="left2right")

    groupResult = [] # 存储每个目标框内的数字识别结果
    # 识别单个目标框内的数字
    for c in group_digit_cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        single = group[y:y+h, x:x+w]
        single = cv2.resize(single, (60, 100))
        #cv_show("single", single)

        # 模板匹配
        scores = [] # 存储当前遍历到的单一数字与模板中的10个数字分别依次匹配后的得分
        for (digit, digit_T) in temp_digits.items():
            match = cv2.matchTemplate(single, digit_T, cv2.TM_CCOEFF)
            (_, score, _, _) = cv2.minMaxLoc(match) # 在匹配结果矩阵中查找最小值和最大值及其位置，对于 TM_CCOEFF，最大值表示最佳匹配
            scores.append(score)

        #print(scores)
        groupResult.append(str(np.argmax(scores))) #  np.argmax(scores)找到得分最高的索引，即识别出的数字
    #print(groupResult)

    # 在原图中标记匹配结果
    cv2.rectangle(image, (X, Y), (X + W, Y + H), (0, 0, 255), 1)
    cv2.putText(image, "".join(groupResult), (X, Y - 12), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
    # 在矩形上方显示数字，"".join(groupOutput): 将识别出的数字连接成字符串；字体大小0.6；字体线宽2

    # 将当前目标框的识别结果加入最终结果中
    result.append(groupResult)


print(result)
cv_show("result", image)