#!/usr/bin/env python
# --*-- coding:utf-8 --*--
# author:g-y-b time:2020/5/25
from PyQt5.QtCore import QTimer, QCoreApplication
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
from four import Ui_Form
import sys
import cv2
import numpy as np
import qimage2ndarray
import win32ui
import os
import matplotlib.pyplot as plt
import sift测试1 as sift
import warnings
from sklearn.neighbors import KNeighborsClassifier
from PIL import Image


class four_ui(QMainWindow, Ui_Form):
    # 在实例化first类时，会自动调用它的初始化函数，因此，我们把想要让程序自动实现的程序功能都放在该函数下
    def __init__(self, parent=None):
        super(four_ui, self).__init__(parent)
        self.setupUi(self)
        self.CallBackFunctions()  # 各个控件背后的功能函数的集合，它定义了我们在程序界面上进行某项操作后实际执行的代码

        self.myImg = None
        self.myImg2 = None

    def CallBackFunctions(self):
        self.pushButton_ChooseImg.clicked.connect(self.ChooseImg)
        self.pushButton_ChooseImg_2.clicked.connect(self.ChooseImg2)
        self.pushButton_SIFT.clicked.connect(self.mySIFT)
        self.pushButton_SIFT_2.clicked.connect(self.mySIFT2)

    def ChooseImg(self):
        dlg = win32ui.CreateFileDialog(1)
        # dlg.SetOFNInitialDir(r'D:\大三\大三下\计算机视觉\实验一\实验一（Python）\Img')
        dlg.DoModal()

        filename = dlg.GetPathName()
        print(filename)
        if filename == '':
            print("读取文件失败！")
        else:
            img = cv2.imread(filename)

            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转RGB
            self.myImg = img

            img = qimage2ndarray.array2qimage(img)  # 数组转QImage
            # 使图片适应label大小
            result = img.scaled(self.label_img.width(), self.label_img.height())
            self.label_img.setPixmap(QPixmap.fromImage(result))
            self.label_img.show()

    def ChooseImg2(self):
        dlg = win32ui.CreateFileDialog(1)
        # dlg.SetOFNInitialDir(r'D:\大三\大三下\计算机视觉\实验一\实验一（Python）\Img')
        dlg.DoModal()

        filename = dlg.GetPathName()
        print(filename)
        if filename == '':
            print("读取文件失败！")
        else:
            img = cv2.imread(filename)

            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 转RGB
            self.myImg2 = img

            img = qimage2ndarray.array2qimage(img)  # 数组转QImage
            # 使图片适应label大小
            result = img.scaled(self.label_img2.width(), self.label_img2.height())
            self.label_img2.setPixmap(QPixmap.fromImage(result))
            self.label_img2.show()

    def mySIFT(self):
        img1 = cv2.cvtColor(self.myImg, cv2.COLOR_RGB2GRAY)
        img2 = cv2.cvtColor(self.myImg2, cv2.COLOR_RGB2GRAY)

        # sift
        sift = cv2.xfeatures2d.SIFT_create()
        keypoints_1, descriptors_1 = sift.detectAndCompute(img1, None)
        keypoints_2, descriptors_2 = sift.detectAndCompute(img2, None)

        # feature matching
        bf = cv2.BFMatcher(cv2.NORM_L1, crossCheck=True)
        matches = bf.match(descriptors_1, descriptors_2)
        matches = sorted(matches, key=lambda x: x.distance)
        print(type(keypoints_1))

        print(keypoints_1[0])
        img3 = cv2.drawMatches(self.myImg, keypoints_1, self.myImg2, keypoints_2, matches[:50], self.myImg2, flags=2)

        img3 = qimage2ndarray.array2qimage(img3)  # 数组转QImage
        # 使图片适应label大小
        result = img3.scaled(self.label_img3.width(), self.label_img3.height())
        self.label_img3.setPixmap(QPixmap.fromImage(result))
        self.label_img3.show()

    def mySIFT2(self):
        origimg = self.myImg
        if len(origimg.shape) == 3:
            img = origimg.mean(axis=-1)
        else:
            img = origimg
        # 计算关键点和描述符
        keyPoints, descriptors_1 = sift.SIFT(img, True)

        origimg2 = self.myImg2
        if len(origimg.shape) == 3:
            img2 = origimg2.mean(axis=-1)
        else:
            img2 = origimg2
        ScaleRatio = img.shape[0] * 1.0 / img2.shape[0]

        img2 = np.array(
            Image.fromarray(img2).resize((int(round(ScaleRatio * img2.shape[1])), img.shape[0]), Image.BICUBIC))
        # 计算关键点和描述符
        keyPoints2, descriptors_2 = sift.SIFT(img2)

        # KNN算法进行匹配
        knn = KNeighborsClassifier(n_neighbors=1)
        knn.fit(descriptors_1, [0] * len(descriptors_1))
        match = knn.kneighbors(descriptors_2, n_neighbors=1, return_distance=True)

        keyPoints = np.array(keyPoints)[:, :2]
        keyPoints2 = np.array(keyPoints2)[:, :2]

        keyPoints2[:, 1] = img.shape[1] + keyPoints2[:, 1]

        origimg2 = np.array(Image.fromarray(origimg2).resize((img2.shape[1], img2.shape[0]), Image.BICUBIC))
        result = np.hstack((origimg, origimg2))

        keyPoints = keyPoints[match[1][:, 0]]

        X1 = keyPoints[:, 1]
        X2 = keyPoints2[:, 1]
        Y1 = keyPoints[:, 0]
        Y2 = keyPoints2[:, 0]

        img = sift.drawLines(X1, X2, Y1, Y2, match[0][:, 0], result)
        img3 = qimage2ndarray.array2qimage(img.astype(np.uint8))  # 数组转QImage
        # 使图片适应label大小
        result = img3.scaled(self.label_img3.width(), self.label_img3.height())
        self.label_img3.setPixmap(QPixmap.fromImage(result))
        self.label_img3.show()


if __name__ == '__main__':
    app = QApplication(sys.argv)
    ui = four_ui()
    ui.show()
    sys.exit(app.exec_())
