import cv2
import numpy as np
from matplotlib import pyplot as plt
import time
import math


def cylindricalWarpImage(img1, f, savefig=False):
    # w=img1.shape[1]
    # f = (w / 2) / math.atan(math.pi/ 8)
    # K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix  模板校准矩阵
    cyl = np.zeros_like(img1)
    cyl_mask = np.zeros_like(img1)
    cyl_h,cyl_w,ch = cyl.shape
    x_c = float(cyl_w) / 2.0
    y_c = float(cyl_h) / 2.0
    for x_cyl in np.arange(0,cyl_w):
        for y_cyl in np.arange(0,cyl_h):
            x1= f*math.atan((x_cyl-x_c)/f)+f*math.atan(x_c/f)
            y1= f*(y_cyl-y_c)/math.sqrt((x_cyl-x_c)*(x_cyl-x_c)+f*f)+y_c

            x_im=(int)(x1+0.5) #加0.5是为了四舍五入
            y_im=(int)(y1+0.5)
            if(x_im<cyl_w and y_im<cyl_h):
                cyl[int(y_im),int(x_im)][0]= img1[int(y_cyl),int(x_cyl)][0]
                cyl[int(y_im), int(x_im)][1] = img1[int(y_cyl), int(x_cyl)][1]
                cyl[int(y_im), int(x_im)][2] = img1[int(y_cyl), int(x_cyl)][2]
                cyl_mask[int(y_cyl),int(x_cyl)] = 255

    if savefig:
        plt.imshow(cyl, cmap='gray')
        plt.savefig("cyl.png",bbox_inches='tight')

    return cyl


def stitch2img(img1,img2):
    MIN = 10
    w1=img1.shape[1]
    w2 = img1.shape[1]
    f1 = (w1 / 2) / math.atan(math.pi/ 8)
    f2= (w1 / 2) / math.atan(math.pi / 8)
    f=(int)((f1+f2)/2)
    img1 =cylindricalWarpImage(img1,f)
    img2 =cylindricalWarpImage(img2,f)


    # img1gray=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
    # img2gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
    surf = cv2.xfeatures2d.SURF_create(400)
    # surf=cv2.xfeatures2d.SIFT_create()#可以改为SIFT
    kp1, descrip1 = surf.detectAndCompute(img1, None)
    kp2, descrip2 = surf.detectAndCompute(img2, None)


    FLANN_INDEX_KDTREE = 0
    indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
    searchParams = dict(checks=50)
    #单应性匹配
    flann = cv2.FlannBasedMatcher(indexParams, searchParams)
    match = flann.knnMatch(descrip1, descrip2, k=2)

    good = []
    for i, (m, n) in enumerate(match):
        if (m.distance < 0.75 * n.distance):
            good.append(m)

    if len(good) > MIN:
        # 相机内参
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        ano_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
        # 单应矩阵
        M, mask = cv2.findHomography(src_pts, ano_pts, cv2.RANSAC, 7.0)
        #cv2.warpPepective  就单应矩阵（单应矩阵反映两图的相互关系）
        #np.linalg.inv(M) 矩阵求逆
        #shape[0] 高度
        #shape[1] 宽度
        # The function warpPerspective transforms the source image using the specified matrix:
        #根据单应矩阵 以图2为基础创建拼接掩膜
        warpImg = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1] + img2.shape[1], img2.shape[0]))
        direct = warpImg.copy()
        #0-图1高（行），0-图1寛（列）为图1
        #在掩膜左边放入图1
        direct[0:img1.shape[0], 0:img1.shape[1]] = img1


        # cv2.imshow("Result",warpImg)
        # cv2.waitKey(0)
        #取图1的行和列

        #融合处理
        rows, cols = img1.shape[:2]

        for col in range(0, cols):
            if img1[:, col].any() and warpImg[:, col].any():  # 开始重叠的最左端
                left = col
                break

        for col in range(cols - 1, 0, -1):
            if img1[:, col].any() and warpImg[:, col].any():  # 重叠的最右一列
                right = col
                break
        #定义融合图的矩阵
        res = np.zeros([rows, cols, 3], np.uint8)
        #遍历整个融合图像
        for row in range(0, rows):
            for col in range(0, cols):
                if not img1[row, col].any():  # 如果图1没有原图，用图2的
                    res[row, col] = warpImg[row, col]
                elif not warpImg[row, col].any():#如果图2没有原图，用图1的
                    res[row, col] = img1[row, col]
                else:  #如果图1图2都没有原图，用图1图2融合处按权重算出来的像素
                    srcImgLen = float(abs(col - left))
                    testImgLen = float(abs(col - right))
                    alpha = srcImgLen / (srcImgLen + testImgLen)
                    #按权重叠加重叠部分，向着左图方向越远离衔接处的左图权重越大，右图的权重越小
                    res[row, col] = np.clip(img1[row, col] *(1 - alpha)+ warpImg[row, col] * alpha  , 0, 255)

        warpImg[0:img1.shape[0], 0:img1.shape[1]] = res

        # seam_finder = cv2.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
        # seam_finder.find(images_warped_f, corners, masks_warped)


        # cv2.imshow("bestpanorma", warpImg)
        # cv2.imwrite("bestpanorma.jpg", warpImg)
        # # cly_img=cylindricalWarpImage(warpImg)
        # # cv2.imshow("cly_img", cly_img)
        # cv2.waitKey(0)
    else:
        print("not enough matches!")
    return (warpImg)

# img1 = cv2.imread("books_1.png")
# img2 = cv2.imread("books_2.png")
# img3 = cv2.imread("books_3.png")
# img12=stitch2img(img1,img2)
# img123=stitch2img(img12,img3)
#
# cv2.imshow("img12",img12)
# cv2.imwrite("img12_3.png",img12)
# cv2.imshow("img123",img123)
# cv2.imwrite("img123_3.png",img123)
# cv2.waitKey()


img1 = cv2.imread("img01.jpg")
img2 = cv2.imread("img02.jpg")
img3 = cv2.imread("img03.jpg")
img12=stitch2img(img1,img2)
img123=stitch2img(img12,img3)

cv2.imshow("img12",img12)
cv2.imwrite("img12_4.jpg",img12)
cv2.imshow("img123",img123)
cv2.imwrite("img123_4.jpg",img123)
cv2.waitKey()