import cv2
import numpy as np


# 平移
def translation(image, x, y):
    M = np.float32([[1, 0, x], [0, 1, y]])
    return cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))


# 旋转
def rotation(image, angle, scale=1.0):
    rows, cols = image.shape
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), angle, scale)
    return cv2.warpAffine(image, M, (cols, rows))


# 缩放 - 近邻插值
def nearest_neighbor_scaling(image, scale_x, scale_y):
    return cv2.resize(image, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_NEAREST)


# 缩放 - 双线性插值
def bilinear_scaling(image, scale_x, scale_y):
    return cv2.resize(image, None, fx=scale_x, fy=scale_y, interpolation=cv2.INTER_LINEAR)


# 镜像
def mirror(image, axis=1):
    return cv2.flip(image, axis)


# 错切
def shear(image, shear_factor_x, shear_factor_y):
    rows, cols = image.shape
    M = np.float32([[1, shear_factor_x, 0], [shear_factor_y, 1, 0]])
    return cv2.warpAffine(image, M, (cols, rows))


# 透视变换
def perspective_transformation(image, src_points, dst_points):
    M = cv2.getPerspectiveTransform(src_points, dst_points)
    return cv2.warpPerspective(image, M, (image.shape[1], image.shape[0]))


# 仿射变换
def affine_transformation(image, src_points, dst_points):
    M = cv2.getAffineTransform(src_points, dst_points)
    return cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))


# 校正几何畸变
def geometric_distortion_correction(image, src_points, dst_points, degree=2):
    poly = cv2.getPerspectiveTransform(src_points, dst_points)
    return cv2.warpPerspective(image, poly, (image.shape[1], image.shape[0]))


# 图像配准 - SIFT
def sift_image_registration(image1, image2):
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(image1, None)
    kp2, des2 = sift.detectAndCompute(image2, None)
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    good = []
    for m, n in matches:
        if m.distance < 0.75 * n.distance:
            good.append(m)
    src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    h, w = image1.shape
    aligned_image = cv2.warpPerspective(image1, M, (w, h))
    return aligned_image


# 图像配准 - ORB
def orb_image_registration(image1, image2):
    orb = cv2.ORB_create()
    kp1, des1 = orb.detectAndCompute(image1, None)
    kp2, des2 = orb.detectAndCompute(image2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)
    src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    h, w = image1.shape
    aligned_image = cv2.warpPerspective(image1, M, (w, h))
    return aligned_image