import cv2
import numpy as np
from matplotlib import pyplot as plt
import time


'''
Warp an image from cartesian coordinates (x, y) into cylindrical coordinates (theta, h)
Returns: (image, mask)
Mask is [0,255], and has 255s wherever the cylindrical images has a valid value.
Masks are useful for stitching

Usage example:

    im = cv2.imread("myimage.jpg",0) #grayscale
    h,w = im.shape
    f = 700
    K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]]) # mock calibration matrix  模板校准矩阵
    imcyl = cylindricalWarpImage(im, K)
'''
def cylindricalWarpImage(img1, K, savefig=False):
    f = K[0,0]
    # im_h,im_w = img1.shape
    # go inverse from cylindrical coord to the image
    # (this way there are no gaps)
    cyl = np.zeros_like(img1)
    cyl_mask = np.zeros_like(img1)
    cyl_h,cyl_w = cyl.shape
    x_c = float(cyl_w) / 2.0
    y_c = float(cyl_h) / 2.0
    for x_cyl in np.arange(0,cyl_w):
        for y_cyl in np.arange(0,cyl_h):
            x1= f*math.atan((x_cyl-x_c)/f)+f*math.atan(x_c/f)
            y1= f*(y_cyl-y_c)/math.sqrt((x_cyl-x_c)*(x_cyl-x_c)+f*f)+y_c

            x_im=(int)(x1+0.5) #加0.5是为了四舍五入
            y_im=(int)(y1+0.5)
            if(x_im<cyl_w and y_im<cyl_h):
                cyl[int(y_im),int(x_im)]= img1[int(y_cyl),int(x_cyl)]
                cyl_mask[int(y_cyl),int(x_cyl)] = 255

    if savefig:
        plt.imshow(cyl, cmap='gray')
        plt.savefig("cyl.png",bbox_inches='tight')

    return (cyl,cyl_mask)



MIN = 10
starttime = time.time()
img1 = cv2.imread('books_1.png')  # query
img2 = cv2.imread('books_2.png')  # train

# img1gray=cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
# img2gray=cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create(400)
# surf=cv2.xfeatures2d.SIFT_create()#可以改为SIFT
kp1, descrip1 = surf.detectAndCompute(img1, None)
kp2, descrip2 = surf.detectAndCompute(img2, None)

FLANN_INDEX_KDTREE = 0
indexParams = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
searchParams = dict(checks=50)
#单应性匹配
flann = cv2.FlannBasedMatcher(indexParams, searchParams)
match = flann.knnMatch(descrip1, descrip2, k=2)

good = []
for i, (m, n) in enumerate(match):
    if (m.distance < 0.75 * n.distance):
        good.append(m)

if len(good) > MIN:
    # 相机内参
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    ano_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    # 单应矩阵
    M, mask = cv2.findHomography(src_pts, ano_pts, cv2.RANSAC, 7.0)
    #cv2.warpPepective  就单应矩阵（单应矩阵反映两图的相互关系）
    #np.linalg.inv(M) 矩阵求逆
    #shape[0] 高度
    #shape[1] 宽度
    # The function warpPerspective transforms the source image using the specified matrix:
    #根据单应矩阵 以图2为基础创建拼接掩膜
    warpImg = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1] + img2.shape[1], img2.shape[0]))
    direct = warpImg.copy()
    #0-图1高（行），0-图1寛（列）为图1
    #在掩膜左边放入图1
    direct[0:img1.shape[0], 0:img1.shape[1]] = img1


    cv2.imshow("Result",warpImg)
    cv2.waitKey(0)
    #取图1的行和列

    #融合处理
    rows, cols = img1.shape[:2]

    for col in range(0, cols):
        if img1[:, col].any() and warpImg[:, col].any():  # 开始重叠的最左端
            left = col
            break

    for col in range(cols - 1, 0, -1):
        if img1[:, col].any() and warpImg[:, col].any():  # 重叠的最右一列
            right = col
            break
    #定义融合图的矩阵
    res = np.zeros([rows, cols, 3], np.uint8)
    #遍历整个融合图像
    for row in range(0, rows):
        for col in range(0, cols):
            if not img1[row, col].any():  # 如果图1没有原图，用图2的
                res[row, col] = warpImg[row, col]
            elif not warpImg[row, col].any():#如果图2没有原图，用图1的
                res[row, col] = img1[row, col]
            else:  #如果图1图2都没有原图，用图1图2融合处按权重算出来的像素
                srcImgLen = float(abs(col - left))
                testImgLen = float(abs(col - right))
                alpha = srcImgLen / (srcImgLen + testImgLen)
                #按权重叠加重叠部分，向着左图方向越远离衔接处的左图权重越大，右图的权重越小
                res[row, col] = np.clip(img1[row, col] *(1 - alpha)+ warpImg[row, col] * alpha  , 0, 255)

    warpImg[0:img1.shape[0], 0:img1.shape[1]] = res

    # seam_finder = cv2.detail_GraphCutSeamFinder("COST_COLOR_GRAD")
    # seam_finder.find(images_warped_f, corners, masks_warped)


    cv2.imshow("bestpanorma.jpg", warpImg)
    cv2.imwrite("bestpanorma.jpg", warpImg)
    cv2.waitKey(0)
else:
    print("not enough matches!")
