from PyQt5.QtWidgets import QLabel,QWidget,QApplication,QVBoxLayout,QHBoxLayout,QPushButton,QLabel,QRadioButton
from PyQt5.QtCore import QRect,Qt
from PyQt5.QtGui import QImage,QPixmap,QPainter,QPen,QColor,QGuiApplication
import sys
import cv2
import PyQt5
import imutils
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import getpath
import os
import logging
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
class Cv1(QWidget):
    def __init__(self):
        super().__init__()
        self.initUI()
    def initUI(self):
        # self.resize(675, 300)
        # self.setWindowTitle('关注微信公众号：学点编程吧--opencv、PyQt5的小小融合')
        v=QVBoxLayout(self)
        
        self.lb = QLabel(self)
        # self.lb.setGeometry(QRect(140, 30, 511, 241))
        self.lb.setScaledContents(True)
        self.lb.setCursor(Qt.CrossCursor)
        
        one=QWidget()
        h=QHBoxLayout(one)
        for i in ["origin","gray","edged","paper","warped","thresh"]:
            c = PyQt5.QtWidgets.QRadioButton(i)
            h.addWidget(c)
            c.pressed.connect(
                lambda val=i: self.field_click(val)
            )
        v.addWidget(one)
        v.addWidget(self.lb)
        button=QPushButton("test1")
        v.addWidget(button)
        button.clicked.connect(self.test1)
        self.load()
    def field_click(self,val):
        if val=="edged":
            self.show_img(self.edged)
        elif val=="origin":
            self.show_img(self.img)
        elif val=="gray":
            self.show_img(self.gray)            
        elif val=="paper":
            self.show_img(self.paper)
        elif val=="warped":
            self.show_img(self.warped)
        elif val=="thresh":
            self.show_img(self.thresh)
    def test1(self):
        self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(self.gray, 127, 255, cv2.THRESH_BINARY)
        self.thresh=thresh
        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        #传入绘制图像，轮廓，轮廓索引，颜色模式，线条厚度
        # 注意需要copy,要不原图会变。。。
        self.paper = self.img.copy()
        cv2.drawContours(self.paper, contours, -1, (0, 0, 255), 2)#-1表示显示所有轮廓，123表示第123个轮廓
        # self.show_img(self.result)
        # cv_show(res,'res')

    def button_click(self):
        self.load()
        self.edge()
    def load(self):
        self.img = cv2.imread(os.path.join(getpath.getpath(),'card1.jpg'))
        cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB, self.img)
        self.show_img(self.img)
    def show_cnts(self):
        pass        
        # color = (0, 255, 0)
        # for (j, c) in enumerate(self.cnts):
        #     # construct a mask that reveals only the current
        #     # "bubble" for the question
        #     mask = np.zeros(self.edged.shape, dtype="uint8")
        #     cv2.drawContours(mask, [c], -1, 255, -1)
        # self.show_img(self.edged)
    def show_img(self,img):
        # logging.info(img)
        logging.info(type(img))#<class 'numpy.ndarray'
        logging.info(img.shape)
        width = img.shape[1]
        height = img.shape[0]
        if(len(img.shape)==3):
            QImg = QImage(img.data, width, height, 3*width, QImage.Format_RGB888)
            pixmap = QPixmap.fromImage(QImg)
        else:
            QImg = QImage(img.data, width, height, width,QImage.Format_Grayscale8)
            pixmap = QPixmap.fromImage(QImg)

        self.lb.setPixmap(pixmap)
    def edge(self):#image = cv2.imread("card1.jpg")
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        self.gray=gray
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        self.blurred=blurred
        self.edged = cv2.Canny(blurred, 75, 200)
        # self.show_img(self.edged)
        # width = self.edged.shape[1]
        # height =self.edged.shape[0]
        # QImg = QImage(self.edged.data, width, height, width,QImage.Format_Grayscale8)
        # pixmap = QPixmap.fromImage(QImg)
        # self.lb.setPixmap(pixmap)
        edged=self.edged
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        
        cnts = imutils.grab_contours(cnts)
        self.cnts=cnts
        docCnt = None

        # ensure that at least one contour was found
        if len(cnts) > 0:
            # sort the contours according to their size in
            # descending order
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

            # loop over the sorted contours
            for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                # if our approximated contour has four points,
                # then we can assume we have found the paper
                if len(approx) == 4:
                    docCnt = approx
                    break
        # apply a four point perspective transform to both the
        # original image and grayscale image to obtain a top-down
        # birds eye view of the paper
        self.paper = four_point_transform(self.img, docCnt.reshape(4, 2))
        self.warped = four_point_transform(self.gray, docCnt.reshape(4, 2))
        # self.show_img(self.warped)
        warped=self.warped
        paper=self.paper
        thresh = cv2.threshold(warped, 0, 255,
            cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        self.thresh=thresh
        # find contours in the thresholded image, then initialize
    #     self.show_img(thresh)
    # def result2(self):
        # the list of contours that correspond to questions
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)

        cnts = imutils.grab_contours(cnts)
        questionCnts = []

        # loop over the contours
        for c in cnts:
            # compute the bounding box of the contour, then use the
            # bounding box to derive the aspect ratio
            (x, y, w, h) = cv2.boundingRect(c)
            ar = w / float(h)

            # in order to label the contour as a question, region
            # should be sufficiently wide, sufficiently tall, and
            # have an aspect ratio approximately equal to 1
            if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
                questionCnts.append(c)
        # sort the question contours top-to-bottom, then initialize
        # the total number of correct answers
        questionCnts = contours.sort_contours(questionCnts,
            method="top-to-bottom")[0]
        correct = 0
        # each question has 5 possible answers, to loop over the
        # question in batches of 5
        for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
            # sort the contours for the current question from
            # left to right, then initialize the index of the
            # bubbled answer
            cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
            bubbled = None

            # loop over the sorted contours
            for (j, c) in enumerate(cnts):
                # construct a mask that reveals only the current
                # "bubble" for the question
                mask = np.zeros(thresh.shape, dtype="uint8")
                cv2.drawContours(mask, [c], -1, 255, -1)

                # apply the mask to the thresholded image, then
                # count the number of non-zero pixels in the
                # bubble area
                mask = cv2.bitwise_and(thresh, thresh, mask=mask)
                total = cv2.countNonZero(mask)

                # if the current total has a larger number of total
                # non-zero pixels, then we are examining the currently
                # bubbled-in answer
                if bubbled is None or total > bubbled[0]:
                    bubbled = (total, j)
            # initialize the contour color and the index of the
            # *correct* answer
            color = (0, 0, 255)
            k = ANSWER_KEY[q]

            # check to see if the bubbled answer is correct
            if k == bubbled[1]:
                color = (0, 255, 0)
                correct += 1

            # draw the outline of the correct answer on the test
            cv2.drawContours(paper, [cnts[k]], -1, color, 3)
        self.show_img(paper)
        score = (correct / 5.0) * 100
        logging.debug("[INFO] score: {:.2f}%".format(score))
if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = Example()
    window.show()
    sys.exit(app.exec())
