import cv2
import time
import numpy as np
from typing import Tuple, Any


# 1.加载图片
def LoadImage(dir1: str, dir2: str, dir3: str) -> Tuple[Any, Any, Any]:
	"""

	:param dir1: 图像1地址
	:param dir2: 图像3地址
	:param dir3: 图像3地址

	:return: 三个图像的矩阵
	"""

	image1 = cv2.imread(dir1)
	image2 = cv2.imread(dir2)
	image3 = cv2.imread(dir3)

	return image1, image2, image3

# 2.1用特sift特征匹配
def SiftMatch(imageISift: np.ndarray, imageIISift: np.ndarray):
	"""

	:param imageISift: 第一张图片
	:param imageIISift: 第二张图片
	:return: 匹配的特征点
	"""
	# 计算SIFT特征
	sift = cv2.xfeatures2d.SIFT_create()
	kp1, des1 = sift.detectAndCompute(imageISift, None)
	kp2, des2 = sift.detectAndCompute(imageIISift, None)

	# FLANN特征匹配
	FLANN_INDEX_KDTREE = 1
	index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
	search_params = dict(checks=50)
	flann = cv2.FlannBasedMatcher(index_params, search_params)
	matches = flann.knnMatch(des1, des2, k=2)


	return kp1, kp2, matches

# 2.2用特surf特征匹配
def SurfMatch(imageISurf: np.ndarray, imageIISurf: np.ndarray):
	"""

	:param imageISurf: 第一张图片
	:param imageIISurf: 第二张图片
	:return: 匹配的特征点
	"""
	# 计算SIFT特征
	sift = cv2.xfeatures2d.SURF_create()
	kp1, des1 = sift.detectAndCompute(imageISurf, None)
	kp2, des2 = sift.detectAndCompute(imageIISurf, None)

	# FLANN特征匹配
	FLANN_INDEX_KDTREE = 1
	index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
	search_params = dict(checks=50)
	flann = cv2.FlannBasedMatcher(index_params, search_params)
	matches = flann.knnMatch(des1, des2, k=2)


	return kp1, kp2, matches

# 3.用RANSAC方法去除误匹配点对。（附筛除后的特征点图）
# 4.给出变换矩阵（仿射模型或投射模型都可以，需注明），并且进行图像配准，完成拼接（前向映射或反向映射都可以，需注明），给出拼接后的图像。
def GetGoodMatch(OriginImage, RegistrationImage, kp1, kp2, matches):
	"""

	:param OriginImage: 第一张图片
	:param RegistrationImage: 第二张图片
	:param kp1: 第一张图片的特征点
	:param kp2: 第二张图片的特征点
	:param matches: 匹配结果
	:return: [第一张图片, 配准后的第二张图片, 匹配结果, 拼接图片, 变换矩阵]
	"""
	# 使用RANSAC方法去除错误匹配点对
	matchesMask = [[0, 0] for _ in range(len(matches))]
	good_match = []
	for i, (m, n) in enumerate(matches):
		if m.distance < 0.5 * n.distance:
			good_match.append(m)
			matchesMask[i] = [1, 0]

	draw_params = dict(matchColor=(0, 255, 0),
					   singlePointColor=(255, 0, 0),
					   matchesMask=matchesMask,
					   flags=0)
	MatchingResult = cv2.drawMatchesKnn(OriginImage, kp1, RegistrationImage, kp2, matches, None, **draw_params)

	MIN_MATCH_COUNT = 10
	if len(good_match) < MIN_MATCH_COUNT:
		print("Not enough matches are found - {}/{}".format(len(good_match), MIN_MATCH_COUNT))
		return None

	src_pts = np.float32([kp1[m.queryIdx].pt for m in good_match]).reshape(-1, 1, 2)
	dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_match]).reshape(-1, 1, 2)

	TransformationMatrix, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
	RegistrationImage = cv2.warpPerspective(RegistrationImage,
	                                        np.array(TransformationMatrix),
	                                        (RegistrationImage.shape[1], RegistrationImage.shape[0]),
							                flags=cv2.WARP_INVERSE_MAP)

	# 拷贝拼接(非常非常简单的拷贝拼接，效果不好，也不好拓展！)
	StitchingImage = OriginImage.copy()
	for i in range(RegistrationImage.shape[0]):
		for j in range(RegistrationImage.shape[1]):
			pix = RegistrationImage[i, j]
			if pix.any():
				StitchingImage[i, j] = pix

	return OriginImage, RegistrationImage, MatchingResult, StitchingImage, TransformationMatrix

# 5.可视化结果
def SiftProcess(dir1: str, dir2: str, dir3: str) -> Tuple[Any, Any, Any]:
	src_img, warp_img1, warp_img2 = LoadImage(dir1, dir2, dir3)

	start = time.time()
	Wrap1_kp1, Wrap1_kp2, matches = SiftMatch(src_img, warp_img1)
	Wrap2_kp1, Wrap2_kp2, matches1 = SiftMatch(src_img, warp_img2)
	end = time.time()
	print('特征点计算以及匹配的时间：', end - start)

	start = time.time()
	_, image2, _, Wrap1_dst, Warp1_M = GetGoodMatch(src_img, warp_img1, Wrap1_kp1, Wrap1_kp2, matches)
	_, image3, _, Wrap2_dst, Wrap2_M = GetGoodMatch(src_img, warp_img2, Wrap2_kp1, Wrap2_kp2, matches1)
	end = time.time()
	print('去除误匹配点、计算变换矩阵并进行拼接的时间：', end - start)

	return src_img, image2, image3

def SurfProcess(dir1: str, dir2: str, dir3: str) -> Tuple[Any, Any, Any]:
	src_img, warp_img1, warp_img2 = LoadImage(dir1, dir2, dir3)

	start = time.time()
	Wrap1_kp1, Wrap1_kp2, matches = SurfMatch(src_img, warp_img1)
	Wrap2_kp1, Wrap2_kp2, matches1 = SurfMatch(src_img, warp_img2)
	end = time.time()
	print('特征点计算以及匹配的时间：', end - start)

	start = time.time()
	_, image2, _, Wrap1_dst, Warp1_M = GetGoodMatch(src_img, warp_img1, Wrap1_kp1, Wrap1_kp2, matches)
	_, image3, _, Wrap2_dst, Wrap2_M = GetGoodMatch(src_img, warp_img2, Wrap2_kp1, Wrap2_kp2, matches1)
	end = time.time()
	print('去除误匹配点、计算变换矩阵并进行拼接的时间：', end - start)

	return src_img, image2, image3


if __name__ == '__main__':
	# imgDir1 = "./pic/1/1.png"
	# imgDir2 = "./pic/2/1.png"
	# imgDir3 = "./pic/3/1.png"

	imgDir1 = "./2_0.bmp"
	imgDir2 = "./2_60.bmp"
	imgDir3 = "./2_120.bmp"

	# imgDir1 = "./0.jpg"
	# imgDir2 = "./60.jpg"
	# imgDir3 = "./120.jpg"

	SiftImg1, SiftImg2, SiftImg3 = SiftProcess(imgDir1, imgDir2, imgDir3)
	SurfImg1, SurfImg2, SurfImg3 = SurfProcess(imgDir1, imgDir2, imgDir3)

	# SiftImg1 = cv2.cvtColor(SiftImg1, cv2.COLOR_BGR2RGB)
	# SiftImg2 = cv2.cvtColor(SiftImg2, cv2.COLOR_BGR2RGB)
	# SiftImg3 = cv2.cvtColor(SiftImg3, cv2.COLOR_BGR2RGB)
	#
	# SurfImg1 = cv2.cvtColor(SurfImg1, cv2.COLOR_BGR2RGB)
	# SurfImg2 = cv2.cvtColor(SurfImg2, cv2.COLOR_BGR2RGB)
	# SurfImg3 = cv2.cvtColor(SurfImg3, cv2.COLOR_BGR2RGB)


	sift_list = [SiftImg1, SiftImg2, SiftImg3]
	SiftResult = np.concatenate(sift_list, axis=1)
	cv2.imshow("SiftResult", SiftResult)

	surf_list = [SurfImg1, SurfImg2, SurfImg3]
	SurfResult = np.concatenate(surf_list, axis=1)
	cv2.imshow("SurfResult", SurfResult)

	cv2.waitKey(0)
	cv2.destroyAllWindows()
