from PyQt5 import QtCore, QtWidgets,QtGui
from Ui_main_widget import Ui_MainWidget
import cv2
import numpy as np
import math

def GaussianHighFilter(image,d):
    f = np.fft.fft2(image)
    fshift = np.fft.fftshift(f)
    s1 = np.log(np.abs(fshift))
    def make_transform_matrix(d):
        transfor_matrix = np.zeros(image.shape)
        center_point = tuple(map(lambda x:(x-1)/2,s1.shape))
        for i in range(transfor_matrix.shape[0]):
            for j in range(transfor_matrix.shape[1]):
                def cal_distance(pa,pb):
                    from math import sqrt
                    dis = sqrt((pa[0]-pb[0])**2+(pa[1]-pb[1])**2)
                    return dis
                dis = cal_distance(center_point,(i,j))
                transfor_matrix[i,j] = 1-np.exp(-(dis**2)/(2*(d**2)))
        return transfor_matrix
    d_matrix = make_transform_matrix(d)
    new_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift*d_matrix)))
    return new_img

def GaussianLowFilter(image,d):
    f = np.fft.fft2(image)
    fshift = np.fft.fftshift(f)
    def make_transform_matrix(d):
        transfor_matrix = np.zeros(image.shape)
        center_point = tuple(map(lambda x:(x-1)/2,image.shape))
        r = np.arange(0, image.shape[0])
        c = np.arange(0, image.shape[1])
        cols, rows = np.meshgrid(r, c)
        rows_delta = rows - center_point[1]
        cols_delta = cols - center_point[0]
        distances = -(np.power(rows_delta, 2) + np.power(cols_delta,2))/(2 * (d**2))
        exps = np.exp(distances).T
        transfor_matrix[:,:,0] = exps
        transfor_matrix[:,:,1] = exps
        transfor_matrix[:,:,2] = exps
        # for i in range(transfor_matrix.shape[0]):
        #     for j in range(transfor_matrix.shape[1]):
        #         def cal_distance(pa,pb):
        #             from math import sqrt
        #             dis = sqrt((pa[0]-pb[0])**2+(pa[1]-pb[1])**2)
        #             return dis
        #         dis = cal_distance(center_point,(i,j))
        #         transfor_matrix[i,j] = np.exp(-(dis**2)/(2*(d**2)))
        return transfor_matrix
    d_matrix = make_transform_matrix(d)
    new_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift*d_matrix)))
    return new_img
class MainWidget(QtWidgets.QWidget):
    def __init__(self, parent = None):
        super().__init__(parent)
        self.ui = Ui_MainWidget()
        self.ui.setupUi(self)
        self.setupUi()
        self.src_img = None
        self.old_img_w = None
        self.old_img_h = None
        self.roi = None

    def setupUi(self):
        self.ui.checkBox_roi_enable.setEnabled(False)
        self.ui.pushButton_open_file.clicked.connect(self.open_img_file)
        self.ui.checkBox_gray_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_close_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_roi_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_equalizeHist_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_threshold_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_open_enable.clicked.connect(self.config_changed)
        self.ui.checkBox_inv.clicked.connect(self.config_changed)
        self.ui.checkBox_top_hat.clicked.connect(self.config_changed)
        self.ui.checkBox_black_hat.clicked.connect(self.config_changed)
        self.ui.checkBox_light_adjust.clicked.connect(self.config_changed)
        self.ui.horizontalSlider_threshold.valueChanged.connect(self.slider_value_change)
        # self.ui.horizontalSlider_threshold.sliderReleased.connect(self.slider_value_stop_change)
        self.ui.lineEdit_threshold.editingFinished.connect(self.lineEdit_thresholdStopEdit)
        self.ui.pushButton_set_roi.clicked.connect(self.setup_roi)
        self.ui.lineEdit_open_size.editingFinished.connect(self.config_changed)
        self.ui.lineEdit_open_count.editingFinished.connect(self.config_changed)
        self.ui.lineEdit_close_size.editingFinished.connect(self.config_changed)
        self.ui.lineEdit_close_count.editingFinished.connect(self.config_changed)
        self.ui.lineEdit_top_hat_size.editingFinished.connect(self.config_changed)
        self.ui.lineEdit_black_hat_size.editingFinished.connect(self.config_changed)
        self.ui.checkBox_enable_distance_transform.clicked.connect(self.config_changed)
        self.ui.checkBox_Cross_Mark.clicked.connect(self.config_changed)

    def setup_roi(self):
        self.roi = self.ui.image_label.getROI()
        self.ui.image_label.clearup()
        self.ui.checkBox_roi_enable.setEnabled(True)
        str_roi = "(%d, %d),(%d, %d)" % (self.roi[0], self.roi[1], self.roi[2], self.roi[3])
        self.ui.label_roi_info.setText(str_roi)

    def lineEdit_thresholdStopEdit(self):
        txt = self.ui.lineEdit_threshold.text()
        if txt == "-1" or txt == "-2":
            self.config_changed()

    def slider_value_change(self, value):
        self.ui.lineEdit_threshold.setText("%d" % value)
        self.config_changed()

    # def slider_value_stop_change(self):
    #     value = self.ui.horizontalSlider_threshold.value()
    #     self.ui.lineEdit_threshold.setText("%d" % value)
    #     self.config_changed()

    def config_changed(self):
        self.do_img_process()

    def do_img_process(self):
        if self.src_img is None:
            return
        
        result = self.src_img
        
        is_gray = False

        if self.ui.checkBox_gray_enable.isChecked():
            result = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
            is_gray = True

            

        if self.ui.checkBox_light_adjust.isChecked():
            if is_gray:
                result = self.adjust_uneven_illumination_gray(result)
            else:
                result = self.adjust_uneven_illumination(result)

        if self.ui.checkBox_top_hat.isChecked() and is_gray:
            ks = int(self.ui.lineEdit_top_hat_size.text())
            kernel = np.ones((ks,ks),np.uint8)
            top_hat = cv2.morphologyEx(result, cv2.MORPH_TOPHAT, kernel)
            result = result - top_hat

        if self.ui.checkBox_black_hat.isChecked() and is_gray:
            ks = int(self.ui.lineEdit_black_hat_size.text())
            kernel = np.ones((ks,ks),np.uint8)
            black_hat = cv2.morphologyEx(result, cv2.MORPH_BLACKHAT, kernel)
            result = result + black_hat

        old_img = result
        if self.ui.checkBox_roi_enable.isChecked():
            result = old_img[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]].copy()
        
        if self.ui.checkBox_equalizeHist_enable.isChecked() and is_gray:
            result = cv2.equalizeHist(result)

        

        if self.ui.checkBox_threshold_enable.isChecked() and is_gray:
            threshold_value = int(self.ui.lineEdit_threshold.text())
            flag = cv2.THRESH_BINARY
            if self.ui.checkBox_inv.isChecked():
                flag = cv2.THRESH_BINARY_INV
                
            if threshold_value == -1:
                flag = flag | cv2.THRESH_OTSU
                ret,result = cv2.threshold(result, -1, 255, flag)
            elif threshold_value <= -1:
                result = cv2.adaptiveThreshold(result, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, flag, -threshold_value, 0)
            else:
                ret,result = cv2.threshold(result, threshold_value, 255, flag)

        if self.ui.checkBox_open_enable.isChecked():
            ks = int(self.ui.lineEdit_open_size.text())
            count = int(self.ui.lineEdit_open_count.text())
            kernel = np.ones((ks,ks),np.uint8)
            result = cv2.morphologyEx(result, cv2.MORPH_OPEN, kernel, iterations=count)

        if self.ui.checkBox_close_enable.isChecked():
            ks = int(self.ui.lineEdit_close_size.text())
            count = int(self.ui.lineEdit_close_count.text())
            kernel = np.ones((ks,ks),np.uint8)
            result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel, iterations=count)

        if self.ui.checkBox_enable_distance_transform.isChecked():
            result = cv2.distanceTransform(result, cv2.DIST_L2, cv2.DIST_MASK_5)
            minvalue, maxvalue, minloc, maxloc = cv2.minMaxLoc(result)
            max_loc_in_src_img = (maxloc[0] + self.roi[0], maxloc[1] + self.roi[1])

            sub_img = result[maxloc[1]-3:maxloc[1]+3, maxloc[0]-3:maxloc[0]+3]
            minvalue, maxvalue, minloc, maxloc = cv2.minMaxLoc(sub_img)

            # result = cv2.equalizeHist(cv2.convertScaleAbs(result))
            
            
            min_loc_in_src_img = (minloc[0]-3 +max_loc_in_src_img[0], minloc[1]-3+max_loc_in_src_img[1])
            print("minloc = ", minloc)
            print("sub_img = ", sub_img)
            cv2.circle(self.src_img, max_loc_in_src_img, 2, (0,0,255), 5)
            cv2.circle(self.src_img, min_loc_in_src_img, 2, (0,255,0), 5)
            
            cv2.imshow("img", self.src_img)

        if self.ui.checkBox_Cross_Mark.isChecked():
            edges = cv2.ximgproc.thinning(result, cv2.ximgproc.THINNING_GUOHALL)
            # _ ,contours, hierarchy = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
            # cv2.drawContours(self.src_img, contours, -1, (0,0,0), 3)
            # print (len(contours))
            cv2.imshow("edges", edges)
            lines = cv2.HoughLinesP(edges, 0.01,np.pi/720, 100, 2, 1)
            
            max_line_len = -100
            max_line_x1 = 0
            max_line_y1 = 0
            max_line_x2 = 0
            max_line_y2 = 0
            for line in lines:
                x1 = line[0, 0]
                y1 = line[0, 1]
                x2 = line[0, 2]
                y2 = line[0, 3]
                x_dist = abs(x2 - x1)
                if x_dist < 5:
                    continue

                line_length = math.sqrt((x2-x1)**2 + (y2-y1)**2)
                if line_length > max_line_len:
                    max_line_len = line_length
                    max_line_x1 = x1
                    max_line_y1 = y1
                    max_line_x2 = x2
                    max_line_y2 = y2

                k = (y2-y1)/(x2-x1)
                print("k = ", k)
            

            e_direct_vector = ((x2-x1)/max_line_len, (y2-y1)/max_line_len)
            
            distance_transform_result = cv2.distanceTransform(result, cv2.DIST_L2, cv2.DIST_MASK_5)
            minvalue, maxvalue, minloc, maxloc = cv2.minMaxLoc(distance_transform_result)
            max_loc_in_src_img = (maxloc[0] + self.roi[0], maxloc[1] + self.roi[1])
            cv2.circle(self.src_img, max_loc_in_src_img, 2, (0,0,255), 3)

            next_point = (int(e_direct_vector[0]*100 + max_loc_in_src_img[0]+0.5), int(e_direct_vector[1]*100 + max_loc_in_src_img[1]+0.5))
            cv2.line(self.src_img, (max_loc_in_src_img[0], max_loc_in_src_img[1]), (next_point[0], next_point[1]), (0,255,0), 1)

            cv2.imshow("thing", self.src_img)


        self.ui.image_label.setImage(result, is_gray)

    def adjust_uneven_illumination(self, src_img):
        """调整光照不均匀"""
        ksize = 5
        q=math.sqrt(2)
        sigma1 = 15
        sigma2 = 80
        sigma3 = 250
        img = src_img
        g1 = GaussianLowFilter(src_img, 0.1)
        g2 = GaussianLowFilter(src_img, 0.3)
        g3 = GaussianLowFilter(src_img, 0.5)

        # for i in range(3):
        #     img = cv2.GaussianBlur(img, (ksize,ksize), sigma3/q)
        # g1 = img.copy()

        # for i in range(5):
        #     img = cv2.GaussianBlur(img, (ksize,ksize), sigma3/q)
        # g2 = img.copy()

        # for i in range(10):
        #     img = cv2.GaussianBlur(img, (ksize,ksize), sigma3/q)
        # g3 = img.copy()

        g = (g1/3 + g2/3 + g3/3)

        m = np.mean(g)
        k = (-g + m)/m
        v = np.zeros(k.shape, dtype=np.float)
        v[:,:,:] = 1/2
        gamma = np.power(v, k)
        F = src_img/255.0
        result = (255.0 * np.power(F, gamma)).astype(np.uint8)
        cv2.imshow("old", src_img)
        cv2.imshow("g", g.astype(np.uint8))
        cv2.imshow("result", result)
        cv2.waitKey()
        return result

    def adjust_uneven_illumination_gray(self, src_img):
        """调整光照不均匀"""
        # ksize = min(src_img.shape[0], src_img.shape[1])
        # if ksize % 2 == 0:
        #     ksize = ksize - 1
        ksize = 5
        q=math.sqrt(2)
        sigma1 = 15
        sigma2 = 80
        sigma3 = 250
        img = src_img
        
        for i in range(3):
            img = cv2.GaussianBlur(img, (ksize,ksize), sigma1/q)
        g1 = img.copy()

        for i in range(5):
            img = cv2.GaussianBlur(img, (ksize,ksize), sigma2/q)
        g2 = img.copy()

        for i in range(10):
            img = cv2.GaussianBlur(img, (ksize,ksize), sigma3/q)
        g3 = img.copy()

        g = (g1/3 + g2/3 + g3/3).astype(np.uint8)

        m = np.mean(g)
        k = (-g + m)/m
        v = np.zeros(k.shape, dtype=np.float)
        v[:,:] = 1/2
        gamma = np.power(v, k)
        F = src_img/255.0
        result = (255.0 * np.power(F, gamma)).astype(np.uint8)
        # result = result - g.astype(np.uint8)
        # cv2.imshow("old", src_img)
        # cv2.imshow("result", result)
        # cv2.waitKey()
        return result

    def open_img_file(self):
        filename, filetype = QtWidgets.QFileDialog.getOpenFileName(
            self,
            "打开图片",
            "F:/new_code/comments_cpp_wrapper/ImageProcessor_Test/test_images",
            "image File(*.*)"
        )
        if filename == '':
            return

        self.src_img = cv2.imread(filename,1)
        
        if self.src_img is None:
            raise RuntimeError("open file error: %s" % filename )

        self.setWindowTitle(filename)
        self.old_img_w = self.src_img.shape[0]
        self.old_img_h = self.src_img.shape[1]
        self.do_img_process()