# -*- coding: utf-8 -*-
import os

import cv2
import numpy as np
from imutils.perspective import four_point_transform

from answer.ocr_sdk import OcrSdk

# 在答题卡涂选答案参数
hor_space = 16 / 345  # 横向间距15
hor_que_space = 14.7 / 345  # 答案单个横向间距
ver_space = 26 / 317  # 纵向间距15
ver_que_space = 39 / 317  # 答案块纵向间距
# 答题卡边缘参数
start_x_margin = 2 / 345
start_y_margin = 15 / 317
# 在答题卡输入区域参数
ver_per = 51 / 138
hor_per = 204 / 362


class Answer(object):
    def __init__(self, img_path):
        print("cv2-version========", cv2.__version__)
        self.img_path = img_path
        self.click_img = None
        self.file_name = os.path.basename(img_path)  # 文件全名称
        self.file_path = os.path.dirname(img_path)  # 文件路径
        # print("file_name", file_name)
        img_name = self.file_name.split(".")[0]
        img_type = self.file_name.split(".")[1]
        self.points_range_file_name = "{}_{}.{}".format(img_name, "points_range", img_type)  # 选择的答案图片的名称
        self.points_threshold_file_name = "{}_{}.{}".format(img_name, "points_threshold", img_type)  # 选择的答案图片的名称
        self.answer_result_file_name = "{}_{}.{}".format(img_name, "answer_result", img_type)  # 识别结果图片

    def start(self):
        gray_trans, img_trans = self.read_img()
        ans_trans, input_trans, img_trans2 = self.transform_again(gray_trans, img_trans)
        texts = self.get_input_text(input_trans)
        cut_img_list = self.cut_ans_img(ans_trans, img_trans2)
        ans_list = []
        for index, cut_img in enumerate(cut_img_list):  # 对20块分割计算结果
            if index == 16:  # 测试代码
                cut_ans_list = self.get_cut_ans(index, cut_img[0], cut_img[1])
                ans_list.extend(cut_ans_list)
        # for ans in ans_list:
        #     print(ans)
        return ans_list, texts, self.file_name, self.points_range_file_name, self.points_threshold_file_name, self.answer_result_file_name

    # 读取图片，根据四个定位圆进行透视变换
    def read_img(self):
        # 载入并显示图片
        print("self.img_path==============", self.img_path)
        img = cv2.imread(self.img_path)
        wid, hei, _ = img.shape
        img = cv2.resize(img, (int(hei / 2), int(wid / 2)), 0, 0)
        # img = cv2.resize(img, (500, 700), 0, 0)
        self.show_img("img", img)
        # 1.降噪（模糊处理用来减少瑕疵点）
        blur = cv2.blur(img, (4, 4))
        self.show_img("blur", blur)
        # 2.灰度化,就是去色（类似老式照片）
        gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
        self.show_img("gray", gray)
        # 3.霍夫变换圆检测-定位答题卡圆圈位置
        circles = cv2.HoughCircles(gray.copy(), cv2.HOUGH_GRADIENT, 1, 200, param1=250, param2=15, minRadius=5,
                                   maxRadius=20)
        circles = np.round(circles[0, :]).astype('int')
        circles = sorted(circles, key=lambda x: x[1])  # 对y轴排序
        top_circles = sorted(circles[0: 2], key=lambda x: x[0])  # 对x轴排序
        bottom_circles = sorted(circles[2: 4], key=lambda x: x[0])  # 对x轴排序
        loc_circles = np.vstack((top_circles, bottom_circles))  # 重新组装的定位圆
        # 遍历4个定位圆，取出其有用的坐标
        four_points = []
        for idx, (x, y, r) in enumerate(loc_circles):
            # 绘制圆和半径矩形到output
            cv2.circle(img, (x, y), r, (0, 255, 0), 4)
            if idx == 0:
                four_points.append([x + r, y + r])
            elif idx == 1:
                four_points.append([x - r, y + r])
            elif idx == 2:
                four_points.append([x + r, y - r])
            elif idx == 3:
                four_points.append([x - r, y - r])
        # 根据定位圆的坐标进行透视变换
        gray_trans = four_point_transform(gray, np.array(four_points))
        img_trans = four_point_transform(img, np.array(four_points))
        # 显示新图像
        self.show_img("circle", img)
        return gray_trans, img_trans

    # 再次根据中间横线进行透视变换
    def transform_again(self, gray_trans, img_trans):
        gaussian_bulr = cv2.GaussianBlur(gray_trans, (5, 5), 0)
        self.show_img("transform-gaussian", gaussian_bulr)
        edged = cv2.Canny(gaussian_bulr, 75, 100)  # 边缘检测,灰度值小于2参这个值的会被丢弃，大于3参这个值会被当成边缘，在中间的部分，自动检测
        self.show_img("transform-edged", edged)
        # 1.寻找轮廓
        image, cts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # 2.将轮廓数据以{c:轮廓，peri:周长}dict形式存放到path_list里面
        path_list = []
        for c in cts:
            peri = 0.01 * cv2.arcLength(c, True)
            path_list.append({"c": c, "peri": peri})
        # 3.对集合数据根据周长进行排序
        path_sort = sorted(path_list, key=lambda x: x['peri'], reverse=True)
        # print("path_sort", path_sort)
        # 显示排序第一个的轮廓数据
        cv2.drawContours(gray_trans, [path_sort[0]['c']], -1, (0, 0, 255), 3)
        self.show_img("draw_contours", gray_trans)
        # 4.取轮廓的矩形坐标
        x, y, w, h = cv2.boundingRect(path_sort[0]['c'])
        cv2.rectangle(gray_trans, (x, y), (x + w, y + h), (0, 255, 0), 2)
        self.show_img("rectangle", gray_trans)
        # print("rect.shape()", rect.shape)
        my, mx = gray_trans.shape
        # 5.利用轮廓坐标组成新的透视定位4坐标点
        ans_four_points = [[0, y + h], [mx, y + h], [0, my], [mx, my]]  # 答案定位四点
        input_four_points = [[0, 0], [mx, 0], [0, y + h], [mx, y + h]]  # 输入框定位四点
        # 6.再次进行透视转换
        ans_trans = four_point_transform(gray_trans, np.array(ans_four_points))
        img_trans2 = four_point_transform(img_trans, np.array(ans_four_points))
        input_trans = four_point_transform(img_trans, np.array(input_four_points))
        return ans_trans, input_trans, img_trans2

    # 获取输入的文字
    def get_input_text(self, img_trans2):
        self.show_img("get_input_text", img_trans2)
        mhei, mwid, _ = img_trans2.shape
        print("img_trans2.shape", img_trans2.shape)  # 138, 362  204 51
        # 裁剪输入图片
        input_img = img_trans2[0:int(ver_per * mhei), 0:int(hor_per * mwid)]
        self.show_img("input_img", input_img)
        ocr = OcrSdk()
        texts = ocr.read_text_cv(input_img)
        return texts

    # 截取答题区域
    def cut_ans_img(self, gray_trans2, img_trans2):
        img_trans2 = img_trans2.copy()
        # self.show_img("img_trans2", img_trans2, True)
        mhei, mwid, _ = img_trans2.shape
        cut_img_list = []
        for j in range(5):
            for i in range(4):
                hor_wid = mwid / 4
                ver_wid = mhei / 5
                input_img = img_trans2[int(j * ver_wid):int((j + 1) * ver_wid),
                            int(i * hor_wid):int((i + 1) * hor_wid)]  # [高，宽]
                input_gray = gray_trans2[int(j * ver_wid):int((j + 1) * ver_wid),
                             int(i * hor_wid):int((i + 1) * hor_wid)]  # [高，宽]
                # self.show_img("cut_ans_img", input_img, True)
                cut_img_list.append([input_gray, input_img])
        return cut_img_list

    # 获取选中的答案
    def get_cut_ans(self, cut_index, gray_trans2, img_trans2):
        self.show_img("get_cut_ans1", img_trans2,True)
        upper = np.array([150, 150, 156])  # 颜色上限
        lower = np.array([80, 81, 82])  # 颜色下限
        mask = cv2.inRange(img_trans2, lower, upper)
        color_mask = cv2.bitwise_and(gray_trans2, gray_trans2, mask=mask)
        self.show_img("get_cut_ans2", color_mask,True)

        color_mask = cv2.copyMakeBorder(color_mask, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
        img_trans2 = cv2.copyMakeBorder(img_trans2, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
        self.show_img("copyMakeBorder", color_mask, True)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 6))
        dil = cv2.dilate(color_mask, kernel)  # 膨胀(对白色区域膨胀，对黑色区域腐蚀)
        # self.show_img("dilate", dil)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 6))
        dil = cv2.dilate(dil, kernel)  # 膨胀(对白色区域膨胀，对黑色区域腐蚀)
        # self.show_img("dilate", dil)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 12))
        dst = cv2.erode(dil, kernel)  # 腐蚀
        self.show_img("erode", dst, True)
        # 获取单元格边框
        r_image, r_cnt, r_hierarchy = cv2.findContours(dst.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # print("边框数量==============", len(r_cnt))
        cv2.drawContours(img_trans2, r_cnt, -1, (0, 0, 255), 1)
        self.show_img("cut-color_mask", img_trans2, True)
        # 绘制矩形
        bd_x, bd_y, bd_w, bd_h = cv2.boundingRect(r_cnt[0])
        cv2.rectangle(img_trans2, (bd_x, bd_y), (bd_x + bd_w, bd_y + bd_h), (0, 0, 255), 1)  # 红色的边界框
        self.show_img("cur-rectangle", img_trans2, True)
        # 再次根据边界位置进行裁剪
        bd_cut_img = img_trans2[bd_y+2:bd_y + bd_h-2, bd_x:bd_x + bd_w]  # [高，宽]
        self.show_img("bd_cut_img", bd_cut_img, True)
        bd_cut_mh, bd_cut_mw, _ = bd_cut_img.shape
        cut_ans_list = []
        for h_num in range(5):
            question_cut_img = bd_cut_img[0:bd_cut_mh,
                               int(h_num * bd_cut_mw / 5) + 2:int((h_num + 1) * bd_cut_mw / 5) - 2]
            self.show_img("question_cut_img", question_cut_img)
            ans_obj = {}
            ans_obj['num'] = cut_index * 5 + (h_num + 1)
            print("num======", ans_obj['num'])
            sel_ans = self.get_sel_que_ans(question_cut_img)
            ans_obj['ans'] = sel_ans
            cut_ans_list.append(ans_obj)
        # print("cut_ans_list", cut_ans_list)
        return cut_ans_list

    # 获取问题的答案
    def get_sel_que_ans(self, question_cut_img):
        que_cut_gray = cv2.cvtColor(question_cut_img.copy(), cv2.COLOR_BGR2GRAY)
        self.show_img("get_sel_que_ans-gaussian", que_cut_gray, True)
        ret, thresh2 = cv2.threshold(que_cut_gray.copy(), 140, 255, cv2.THRESH_BINARY_INV)
        self.show_img("get_sel_que_ans_ostu2", thresh2, True)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 2))
        dst = cv2.erode(thresh2, kernel)  # 腐蚀
        self.show_img("get_sel_que_ans_erode", dst, True)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 2))
        dil = cv2.dilate(dst, kernel)  # 膨胀(对白色区域膨胀，对黑色区域腐蚀)
        self.show_img("dilate", dil, True)
        r_image, r_cnt, r_hierarchy = cv2.findContours(dil.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        # print("找到轮廓个数----------------：", len(r_cnt))
        cv2.drawContours(question_cut_img, r_cnt, -1, (255, 0, 0), 1)  # 绿色的答案
        self.show_img("get_sel_que_ans", question_cut_img, True)
        mh, mw, _ = question_cut_img.shape
        # 把所有找到的轮廓，给标记出来
        sel_ans = []
        for cxx in r_cnt:
            # 通过矩形，标记每一个指定的轮廓
            x, y, w, h = cv2.boundingRect(cxx)
            ar = w / float(h)
            if (w >= 6 or h >= 5) and y > mh / 5 - 2:
                cv2.rectangle(question_cut_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                ans = self.compute_answer(y + h / 2, mh)
                sel_ans.append(ans)
        print("sel_ans========", sel_ans)
        self.show_img("sel_point_threshold", question_cut_img, True)
        return sel_ans

    # 获取答案
    def compute_answer(self, sel, all):
        percent = sel / all
        if percent < 1 / 5:
            return "num"
        elif percent < 2 / 5:
            return "A"
        elif percent < 3 / 5:
            return "B"
        elif percent < 4 / 5:
            return "C"
        elif percent < 1:
            return "D"

    # 绘制结果图
    def save_result_img(self, card_list, reduce_list, img_trans2):
        ans_obj = {"A": 1, "B": 2, "C": 3, "D": 4}
        mh, mw, _ = img_trans2.shape
        for ans in reduce_list:  # ans :[1, 'B']
            card = card_list[ans[0] - 1]  # card:{'key': 1, 'value': [2.0347826086956524, 16.37223974763407]}
            start_poss = card.get("value")
            h = int(ver_que_space * mh / 5)
            w = int(hor_que_space * mw)
            x = int(start_poss[0])
            if ans[1] in ans_obj.keys():
                y = int(start_poss[1] + ans_obj.get(ans[1]) * h)
                cv2.rectangle(img_trans2, (x, y), (x + w, y + h), (255, 0, 0), 1)  # 绿色的答案框
        self.save_image(img_trans2, self.answer_result_file_name, "answer result")

    # 保存图片
    def save_image(self, img_res, img_name, text="answer img"):
        mh, mw, _ = img_res.shape
        text_x = int(mw / 4)
        text_y = int((ver_que_space + ver_space) * mh + 10)
        cv2.putText(img_res, text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (126, 211, 33), 1)
        file_path = os.path.join(self.file_path, img_name)
        cv2.imwrite(file_path, img_res)

    def show_img(self, title, img, wait=False):
        cv2.imshow(title, img)
        if wait:
            self.click_img = img
            cv2.setMouseCallback(title, self.mouse_click)
            cv2.waitKey(0)

    def mouse_click(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            print("mouse_click======EVENT_LBUTTONDOWN", event, x, y)
            if self.click_img.any(): print("BGR:", self.click_img[y, x])


if __name__ == '__main__':
    answer = Answer("../../media/test/1622170544478.jpg")
    # answer = Answer("../../media/images/mini_t7.jpg")
    answer.start()
