import numpy as np
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QIcon, QColor
from PyQt5.QtWidgets import QListWidgetItem, QPushButton
from PyQt5 import QtWidgets
from flags import *
import imutils
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import cv2
import logging
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
import numpy as np
import cv2
from collections import OrderedDict
import math

def addItems(items,item):
    item=(item[0],item[1],item[2])
    if item in items.keys():
        items[item]+=1
    else:
        items[item]=1
    return items
def sort_value(old_dict, reverse=False):
    """对字典按 value 排序, 默认升序, 不修改原先字典"""
    # 获取按 value 排序后的元组列表
    items = sorted(old_dict.items(), key=lambda obj: obj[1], reverse=reverse)
    # 创建一个新的空字典
    new_dict = OrderedDict()
    # 遍历 items 列表
    for item in items:
        # item[0] 存的是 key 值
        new_dict[item[0]] = old_dict[item[0]]
    return new_dict
#除背景
def background(img,num,jl_bijiao):
    raw_img=img.copy()
    height, width = raw_img.shape[0], raw_img.shape[1]

    #将所有点的颜色放入字典{颜色：数量}
    colors=OrderedDict()
    for i in range(height):
        for j in range(width):
            addItems(colors,raw_img[i, j])
    #按颜色数量排序
    colors=sort_value(colors)
    # print(colors)
    #取数量最多的14个颜色
    logging.info([num,jl_bijiao])
    max_keys=list(colors.keys())[-num:]
    #求最多颜色的平均值
    max_key=getAverage(max_keys)
    # for one in colors.keys():
    #     print(one,max_key,jl(one,max_key))
    #与最多颜色比较，距离<70的抹去
    for i in range(height):
        for j in range(width):
            p=(raw_img[i, j][0],raw_img[i, j][1],raw_img[i, j][2])
            if jl(p,max_key)<jl_bijiao:
                raw_img[i, j] = 0
            else:
                pass
    #输出
    cv2.imwrite("out.jpg",raw_img)
    return raw_img
#计算颜色平均值
def getAverage(l):
    r=[0.0,0.0,0.0]
    ct=0.0
    for one in l:
        r[0]=r[0]+one[0]
        r[1]=r[1]+one[1]
        r[2]=r[2]+one[2]
        ct+=1
    return(r[0]/ct,r[1]/ct,r[2]/ct)
#计算两个颜色之间距离
def jl(p1,p2):
    # print(p1,p2)
    t=(math.pow(float(p1[0])-float(p2[0]),2)+math.pow(float(p1[1])-float(p2[1]),2)+math.pow(float(p1[2])-float(p2[2]),2))
    return math.sqrt(t)
def cnt_left_top(cnt):
    """返回轮廓的面积"""
    # logging.info(dir(cnt),len(cnt))
    # area = cv2.contourArea(cnt)
    the=left_top(cnt)
    return the[1] +the[0]/30.0
def left_top(cnt):
    """返回轮廓的面积"""
    # logging.info(dir(cnt),len(cnt))
    # area = cv2.contourArea(cnt)
    the=cnt[0][0]
    for one  in cnt[1:]:
        # logging.info([one,the])
        # logging.info(the,type(one),one)
        one=one[0]
        if the[1]>one[1]:
            the[1]=one[1]
        elif the[0]>one[0]:
            the[0]=one[0]
    return the#[1] +the[0]/200    
    # logging.info(type(one),dir(one),one)
    # return area
class MyItem(QListWidgetItem):
    def __init__(self, name=None, parent=None):
        super().__init__(name, parent)
        self.setIcon(QIcon('icons/color.png'))
        self.setSizeHint(QSize(60, 60))  # size

    def get_params(self):
        protected = [v for v in dir(self) if v.startswith('_') and not v.startswith('__')]
        param = {}
        for v in protected:
            param[v.replace('_', '', 1)] = self.__getattribute__(v)
        return param

    def update_params(self, param):
        logging.info(param)        
        for k, v in param.items():
            if '_' + k in dir(self):
                self.__setattr__('_' + k, v)



class GrayingItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('gray', parent=parent)
        self._mode = BGR2GRAY_COLOR

    def __call__(self, img):
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        return img
class Score2Item(MyItem):
    def __init__(self, parent=None):
        super().__init__('Score2', parent=parent)

    def __call__(self, img):
        # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        # edged = cv2.Canny(blurred, 75, 200)
        thresh = cv2.threshold(img, 0, 255,
            cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        # find contours in the thresholded image, then initialize
        # the list of contours that correspond to questions
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)

        cnts = imutils.grab_contours(cnts)
        # img = cv2.drawContours(img, cnts, -1, (255, 0, 0), thickness=2)
        # return img
        questionCnts = []
        logging.info(len(cnts))
        # loop over the contours
        for c in cnts:
            # compute the bounding box of the contour, then use the
            # bounding box to derive the aspect ratio
            (x, y, w, h) = cv2.boundingRect(c)
            ar = w / float(h)
            logging.info("w,h,ar",w,h,ar)
            # in order to label the contour as a question, region
            # should be sufficiently wide, sufficiently tall, and
            # have an aspect ratio approximately equal to 1
            if w >= 30 and h >= 30 and ar >=0.8 and ar <= 1.2:
                questionCnts.append(c)
        # sort the question contours top-to-bottom, then initialize
        # the total number of correct answers

        logging.info("len",len(questionCnts))
        
        # return img

        # questionCnts = contours.sort_contours(questionCnts,
        #     method='left-to-right')[0]
        # font = cv2.FONT_HERSHEY_SIMPLEX 
        # for i in range(10):
        #     cv2.putText(img, str(i*10), [i*10,i*10], font, .2, (0, 255, 255), 1)
        # points=[]
        # for i in range(len(questionCnts)):
        #     point=cnt_left_top(questionCnts[i])
        #     font = cv2.FONT_HERSHEY_SIMPLEX 
        #     cv2.putText(img, str(i), point, font,0.3, (0, 255, 255), 1)
        # return img
        questionCnts.sort(key=cnt_left_top, reverse=False)
        cv2.drawContours(img, questionCnts, -1, (255, 0, 0), thickness=2)
        for i in range(len(questionCnts)):
            point=left_top(questionCnts[i])
            # logging.info(point,cnt_left_top(questionCnts[i]))
            font = cv2.FONT_HERSHEY_SIMPLEX 
            cv2.putText(img, str(i), point, font,0.3, (0, 255, 255), 1)
        # return img
        correct = 0
        # each question has 5 possible answers, to loop over the
        # question in batches of 5
        for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
            logging.info("q:"+str(q)+"i:"+str(i))
            # sort the contours for the current question from
            # left to right, then initialize the index of the
            # bubbled answer
            
            cnts = questionCnts[i:i + 5]
            bubbled = None

            # loop over the sorted contours
            hd=[]
            for (j, c) in enumerate(cnts):
                # construct a mask that reveals only the current
                # "bubble" for the question
                mask = np.zeros(thresh.shape, dtype="uint8")
                cv2.drawContours(mask, [c], -1, 255, -1)

                # apply the mask to the thresholded image, then
                # count the number of non-zero pixels in the
                # bubble area
                mask = cv2.bitwise_and(thresh, thresh, mask=mask)
                total = cv2.countNonZero(mask)
                hd.append(total)
                # if the current total has a larger number of total
                # non-zero pixels, then we are examining the currently
                # bubbled-in answer
                if bubbled is None or total > bubbled[0]:
                    bubbled = (total, j)
            # initialize the contour color and the index of the
            # *correct* answer
            color = (0, 0, 255)
            # k = ANSWER_KEY[q]

            # check to see if the bubbled answer is correct
            # if k == bubbled[1]:
            #     color = (0, 255, 0)
            #     correct += 1
            logging.info(q,bubbled[1])
            # draw the outline of the correct answer on the test
            cv2.drawContours(img, [cnts[bubbled[1]]], -1, color, 3)
        logging.info(img.shape[0])
        # score = (correct / 5.0) * 100
        # logging.debug("[INFO] score: {:.2f}%".format(score))
        return(img)        
class ScoreItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('Score', parent=parent)

    def __call__(self, img):
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        blurred = cv2.GaussianBlur(gray, (5, 5), 0)

        edged = cv2.Canny(blurred, 75, 200)
        # show_img(edged)
        # width = edged.shape[1]
        # height =edged.shape[0]
        # QImg = QImage(edged.data, width, height, width,QImage.Format_Grayscale8)
        # pixmap = QPixmap.fromImage(QImg)
        # lb.setPixmap(pixmap)

        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        
        cnts = imutils.grab_contours(cnts)
        #get cocCnt
        docCnt = None

        # ensure that at least one contour was found
        if len(cnts) > 0:
            # sort the contours according to their size in
            # descending order
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

            # loop over the sorted contours
            for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                # if our approximated contour has four points,
                # then we can assume we have found the paper
                if len(approx) == 4:
                    docCnt = approx
                    break
        # apply a four point perspective transform to both the
        # original image and grayscale image to obtain a top-down
        # birds eye view of the paper
        #origin to doc
        paper = four_point_transform(img, docCnt.reshape(4, 2))
        #gray to doc
        warped = four_point_transform(gray, docCnt.reshape(4, 2))
        # show_img(warped)

        thresh = cv2.threshold(warped, 0, 255,
            cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        # find contours in the thresholded image, then initialize

        # the list of contours that correspond to questions
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)

        cnts = imutils.grab_contours(cnts)
        #get questionCnts
        questionCnts = []

        # loop over the contours
        for c in cnts:
            # compute the bounding box of the contour, then use the
            # bounding box to derive the aspect ratio
            (x, y, w, h) = cv2.boundingRect(c)
            ar = w / float(h)

            # in order to label the contour as a question, region
            # should be sufficiently wide, sufficiently tall, and
            # have an aspect ratio approximately equal to 1
            if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
                questionCnts.append(c)
        # sort the question contours top-to-bottom, then initialize
        # the total number of correct answers
        questionCnts = contours.sort_contours(questionCnts,
            method="top-to-bottom")[0]
        correct = 0
        # each question has 5 possible answers, to loop over the
        # question in batches of 5
        for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
            # sort the contours for the current question from
            # left to right, then initialize the index of the
            # bubbled answer
            cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
            bubbled = None

            # loop over the sorted contours
            for (j, c) in enumerate(cnts):
                # construct a mask that reveals only the current
                # "bubble" for the question
                mask = np.zeros(thresh.shape, dtype="uint8")
                cv2.drawContours(mask, [c], -1, 255, -1)

                # apply the mask to the thresholded image, then
                # count the number of non-zero pixels in the
                # bubble area
                mask = cv2.bitwise_and(thresh, thresh, mask=mask)
                total = cv2.countNonZero(mask)

                # if the current total has a larger number of total
                # non-zero pixels, then we are examining the currently
                # bubbled-in answer
                if bubbled is None or total > bubbled[0]:
                    bubbled = (total, j) #the painted
            # initialize the contour color and the index of the
            # *correct* answer
            color = (0, 0, 255)
            k = ANSWER_KEY[q]

            # check to see if the bubbled answer is correct
            if k == bubbled[1]:
                color = (0, 255, 0)
                correct += 1

            # draw the outline of the correct answer on the test
            cv2.drawContours(paper, [cnts[k]], -1, color, 3)
        score = (correct / 5.0) * 100
        logging.debug("[INFO] score: {:.2f}%".format(score))
        return(paper)
class RotateItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('rotate', parent=parent)
        self._kind = cv2.ROTATE_90_CLOCKWISE
    def __call__(self, img):

        # rotateCode=cv2.ROTATE_90_CLOCKWISE
        # cv2.ROTATE_180
        # cv2.ROTATE_90_COUNTERCLOCKWISE
        if self._kind==0:
            img=cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        elif self._kind==1:
            img=cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
        else:
            img=cv2.rotate(img, cv2.ROTATE_180)
        return img

class FilterItem(MyItem):

    def __init__(self, parent=None):
        super().__init__('blur', parent=parent)
        self._ksize = 3
        self._kind = MEAN_FILTER
        self._sigmax = 0

    def __call__(self, img):
        if self._kind == MEAN_FILTER:
            img = cv2.blur(img, (self._ksize, self._ksize))
        elif self._kind == GAUSSIAN_FILTER:
            img = cv2.GaussianBlur(img, (self._ksize, self._ksize), self._sigmax)
        elif self._kind == MEDIAN_FILTER:
            img = cv2.medianBlur(img, self._ksize)
        return img


class MorphItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('morphologyEx', parent=parent)
        self._ksize = 3
        self._op = ERODE_MORPH_OP
        self._kshape = RECT_MORPH_SHAPE

    def __call__(self, img):
        op = MORPH_OP[self._op]
        kshape = MORPH_SHAPE[self._kshape]
        kernal = cv2.getStructuringElement(kshape, (self._ksize, self._ksize))
        img = cv2.morphologyEx(img, self._op, kernal)
        return img


class GradItem(MyItem):

    def __init__(self, parent=None):
        super().__init__('Sobel', parent=parent)
        self._kind = SOBEL_GRAD
        self._ksize = 3
        self._dx = 1
        self._dy = 0

    def __call__(self, img):
        if self._dx == 0 and self._dy == 0 and self._kind != LAPLACIAN_GRAD:
            self.setBackground(QColor(255, 0, 0))
            self.setText('图像梯度 （无效: dx与dy不同时为0）')
        else:
            self.setBackground(QColor(200, 200, 200))
            self.setText('图像梯度')
            if self._kind == SOBEL_GRAD:
                img = cv2.Sobel(img, -1, self._dx, self._dy, self._ksize)
            elif self._kind == SCHARR_GRAD:
                img = cv2.Scharr(img, -1, self._dx, self._dy)
            elif self._kind == LAPLACIAN_GRAD:
                img = cv2.Laplacian(img, -1)
        return img


class ThresholdItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('threshold', parent=parent)
        self._thresh = 127
        self._maxval = 255
        self._method = BINARY_THRESH_METHOD

    def __call__(self, img):
        method = THRESH_METHOD[self._method]
        # img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        img = cv2.threshold(img, self._thresh, self._thresh, method)[1]
        # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        return img


class EdgeItem(MyItem):
    def __init__(self, parent=None):
        super(EdgeItem, self).__init__('Canny', parent=parent)
        self._thresh1 = 20
        self._thresh2 = 100

    def __call__(self, img):
        img = cv2.Canny(img, threshold1=self._thresh1, threshold2=self._thresh2)
        # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        return img


class ContourItem(MyItem):
    def __init__(self, parent=None):
        super(ContourItem, self).__init__('findContours', parent=parent)
        self._mode = TREE_CONTOUR_MODE
        self._method = SIMPLE_CONTOUR_METHOD
        self._bbox = NORMAL_CONTOUR

    def __call__(self, img):
        mode = CONTOUR_MODE[self._mode]
        method = CONTOUR_METHOD[self._method]
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        cnts, _ = cv2.findContours(img, mode, method)
        # logging.info(len(cnts))
        # QtWidgets.qApp.cnts=cnts
        # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        QtWidgets.qApp.cnts=[]
        if self._bbox == RECT_CONTOUR:
            bboxs = [cv2.boundingRect(cnt) for cnt in cnts]
            # logging.debug(bboxs)
            # n=0
            for i in range(len(bboxs)):
                (x, y, w, h)=bboxs[i]
                # if h>6:
                QtWidgets.qApp.cnts.append(cnts[i])
                img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), thickness=1)
                    # n+=1
            # logging.info("h>5:"+str(n))
        elif self._bbox == MINRECT_CONTOUR:
            bboxs = [np.int0(cv2.boxPoints(cv2.minAreaRect(cnt))) for cnt in cnts]
            img = cv2.drawContours(img, bboxs, -1, (255, 0, 0), thickness=1)
        elif self._bbox == MINCIRCLE_CONTOUR:
            circles = [cv2.minEnclosingCircle(cnt) for cnt in cnts]
            # logging.debug(circles)
            for (x, y), r in circles:
                img = cv2.circle(img, (int(x), int(y)), int(r), (255, 0, 0), thickness=1)
        elif self._bbox == NORMAL_CONTOUR:
            pass
            img = cv2.drawContours(img, cnts, -1, (255, 0, 0), thickness=1)

        return img


class EqualizeItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('equalizeHist', parent=parent)
        self._blue = True
        self._green = True
        self._red = True

    def __call__(self, img):
        split=cv2.split(img)
        if len(split)!=3:
            return img
        b, g, r = split
        if self._blue:
            b = cv2.equalizeHist(b)
        if self._green:
            g = cv2.equalizeHist(g)
        if self._red:
            r = cv2.equalizeHist(r)
        return cv2.merge((b, g, r))


class HoughLineItem(MyItem):
    def __init__(self, parent=None):
        super(HoughLineItem, self).__init__('HoughLinesP', parent=parent)
        self._rho = 1
        self._theta = np.pi / 180
        self._thresh = 10
        self._min_length = 20
        self._max_gap = 5

    def __call__(self, img):
        # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        lines = cv2.HoughLinesP(img, self._rho, self._theta, self._thresh, minLineLength=self._min_length,
                                maxLineGap=self._max_gap)
        # img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        if lines is None: return img
        for line in lines:
            for x1, y1, x2, y2 in line:
                img = cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
        return img


class LightItem(MyItem):
    def __init__(self, parent=None):
        super(LightItem, self).__init__('addWeighted', parent=parent)
        self._alpha = 1
        self._beta = 0

    def __call__(self, img):
        blank = np.zeros(img.shape, img.dtype)
        img = cv2.addWeighted(img, self._alpha, blank, 1 - self._alpha, self._beta)
        return img


class GammaItem(MyItem):
    def __init__(self, parent=None):
        super(GammaItem, self).__init__('LUT', parent=parent)
        self._gamma = 1

    def __call__(self, img):
        logging.info("here")
        gamma_table = [np.power(x / 255.0, self._gamma) * 255.0 for x in range(256)]
        gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
        return cv2.LUT(img, gamma_table)
class DialationItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('Dialate', parent=parent)

    def __call__(self, img):
        kernel = np.ones((5,5),np.uint8)
        return cv2.dilate(img, kernel,iterations=1)

class ErodeItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('Erode', parent=parent)

    def __call__(self, img):
        kernel = np.ones((5,5),np.uint8)
        return cv2.erode(img, kernel,iterations=1)
class RepairItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('Repair', parent=parent)

    def __call__(self, img):  
        if len(img.shape)==2:
            logging.info("cannot repair gray")
            return img
        damaged_img=img.copy() 
        # get the shape of the image
        height, width = damaged_img.shape[0], damaged_img.shape[1]
         
        # Converting all pixels greater than zero to black while black becomes white
        for i in range(height):
            for j in range(width):
                if damaged_img[i, j].sum() > 0:
                    damaged_img[i, j] = 0
                else:
                    damaged_img[i, j] = [255, 255, 255]
         
        # saving the mask 
        mask = cv2.cvtColor(damaged_img, cv2.COLOR_BGR2GRAY)
         
        # Inpaint.
        dst = cv2.inpaint(img, mask, 3, cv2.INPAINT_NS)
        return dst
class FourItem(MyItem):
    def __init__(self, parent=None):
        super().__init__('Four', parent=parent)

    def __call__(self, img):

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (5, 5), 0)
        edged = cv2.Canny(blurred, 75, 200)
        cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        docCnt = None

        # ensure that at least one contour was found
        if len(cnts) > 0:
            # sort the contours according to their size in
            # descending order
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

            # loop over the sorted contours
            for c in cnts:
                # approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                # if our approximated contour has four points,
                # then we can assume we have found the paper
                if len(approx) == 4:
                    docCnt = approx
                    break
        # apply a four point perspective transform to both the
        # original image and grayscale image to obtain a top-down
        # birds eye view of the paper
        paper = four_point_transform(img, docCnt.reshape(4, 2))
        return paper
class RmColorBg(MyItem):
    def __init__(self, parent=None):
        super().__init__('RmColorBg', parent=parent)
        self._num=14
        self._jl=70
    def __call__(self, img):
        logging.info([self._num,self._jl])
        if self._num<0:
            self._num=1
        # if self._jl is None:
        #     self._jl=70
        # return img
        paper=background(img,int(self._num),self._jl)
        return paper
