{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "5382c7eb-ffb0-4d9b-b7ae-30265d5f1656",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第二部分：图像拼接\n",
    "# 1、读取图片并重置尺寸\n",
    "# 2、根据特征点和计算描述子，得到单应性矩阵\n",
    "# 3、图像变换\n",
    "# 4、图像拼接并输出图像"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "78c2fc74-c643-4efd-a652-c688acca632d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b69c0540-534f-451b-a4c4-5a6fc62e6f27",
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "import ctypes\n",
    "\n",
    "def drawPicture(name, pic):\n",
    "    # 定义用于获取窗口大小的Windows API函数\n",
    "    user32 = ctypes.windll.user32\n",
    "    \n",
    "    # 设置可调整大小的窗口\n",
    "    cv2.namedWindow(name, cv2.WINDOW_NORMAL)\n",
    "    \n",
    "    while True:\n",
    "        # 设置窗口的宽度和高度\n",
    "        window_width = 1920\n",
    "        window_height = 1080\n",
    "    \n",
    "        # 根据窗口大小调整图片尺寸\n",
    "        resized_image = cv2.resize(pic, (window_width, window_height))\n",
    "    \n",
    "        # 重新显示调整后的图片\n",
    "        cv2.imshow(name, resized_image)\n",
    "    \n",
    "        if cv2.waitKey(0):\n",
    "            break\n",
    "    \n",
    "    cv2.destroyAllWindows()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ec9ddbae-36e3-45ea-90c3-701b8b4c85dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_homo(img1, img2):\n",
    "    # 1创建特征转换对象\n",
    "    # 2通过特征转换对象获得特征点和描述子\n",
    "    # 3创建特征匹配器\n",
    "    # 4进行特征匹配\n",
    "    # 5过滤特征，找到有效的特征匹配点\n",
    "\n",
    "    sift = cv2.xfeatures2d.SIFT_create()\n",
    "\n",
    "    k1, d1 = sift.detectAndCompute(img1, None)\n",
    "    k2, d2 = sift.detectAndCompute(img2, None)\n",
    "\n",
    "    # 创建特征匹配器\n",
    "    bf = cv2.BFMatcher()\n",
    "    matches = bf.knnMatch(d1, d2, k=2)\n",
    "\n",
    "    verify_ratio = 0.8\n",
    "    verify_matches = []\n",
    "    \n",
    "    for m1, m2 in matches:\n",
    "        if m1.distance < 0.45*m2.distance:\n",
    "            verify_matches.append(m1)\n",
    "\n",
    "    min_matches = 8\n",
    "    if len(verify_matches) > min_matches: # 实际上匹配的特征点有4个以上就能构建单应性矩阵\n",
    "\n",
    "        img1_pts = []\n",
    "        img2_pts = []\n",
    "\n",
    "        for m in verify_matches:\n",
    "            img1_pts.append(k1[m.queryIdx].pt) # img1当中的点去查找query\n",
    "            img2_pts.append(k2[m.trainIdx].pt) # img2当中的点被查找train\n",
    "\n",
    "        img1_pts = np.float32(img1_pts).reshape(-1,1,2)\n",
    "        img2_pts = np.float32(img2_pts).reshape(-1,1,2)\n",
    "        \n",
    "        H, mask = cv2.findHomography(img1_pts, img2_pts, cv2.RANSAC, 0.5) # 随机抽样一致性\n",
    "        print('homography matrix:\\n{}'.format(H))\n",
    "        print('homography mask:\\n{}'.format(np.int32(mask).reshape(-1,len(img1_pts))))\n",
    "\n",
    "        return H\n",
    "\n",
    "    else:\n",
    "        print('err: Not enough match key points\\n')\n",
    "        exit()        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "9a2a9a00-8a8a-47b4-b614-c336decaa0c5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def stitch_image(img1, img2, H):\n",
    "    # 1获得每张图片的四个角点\n",
    "    # 2对第二张图片进行变换\n",
    "    # 3创建一个大图，对图像进行平移\n",
    "    # 4实现两张图片的拼接\n",
    "\n",
    "    h1, w1 = img1.shape[:2]\n",
    "    h2, w2 = img2.shape[:2]\n",
    "    \n",
    "    # 获得图片的四个角点\n",
    "    img1_dims = np.float32([[0,0], [0,h1], [w1,h1], [w1,0]]).reshape(-1,1,2)\n",
    "    img2_dims = np.float32([[0,0], [0,h2], [w2,h2], [w2,0]]).reshape(-1,1,2)\n",
    "\n",
    "    img2_transform = cv2.perspectiveTransform(img2_dims, H) # 单应性矩阵，将img2的4个角点转换到img1当中\n",
    "    print(img2_transform)\n",
    "    # drawPicture('img2', img2)\n",
    "    \n",
    "    result_dims = np.concatenate((img1_dims, img2_transform), axis=0)\n",
    "    # print(result_dims)   \n",
    "\n",
    "    [x_min, y_min] = np.int32(result_dims.min(axis=0).ravel()-0.5)\n",
    "    [x_max, y_max] = np.int32(result_dims.max(axis=0).ravel()+0.5)\n",
    "    # print(x_min, y_min)\n",
    "    # print(x_max, y_max)\n",
    "\n",
    "    # 平移的距离\n",
    "    transform_dist = [-x_min, -y_min]\n",
    "    result_img = cv2.warpPerspective(img1, H, (x_max-x_min, y_max-y_min)) # 图像img1透视转换，图像宽高变化\n",
    "    #drawPicture('homoImage1', result_img)\n",
    "    \n",
    "    #[1, 0, dx]\n",
    "    #[0, 1, dy]\n",
    "    #[0, 0, 1]\n",
    "    # 平移变换的img1\n",
    "    transform_array = np.array([[1, 0, transform_dist[0]],\n",
    "                                [0, 1, transform_dist[1]],\n",
    "                                [0, 0, 1]])\n",
    "    result_img = cv2.warpPerspective(img1, transform_array.dot(H), (x_max-x_min, y_max-y_min))\n",
    "\n",
    "    '''\n",
    "    # 直方图规定化，尝试去除缝隙\n",
    "    hist = cv2.calcHist([img2], [0], None, [256], [0, 256])\n",
    "    cdf = hist.cumsum()\n",
    "    cdf_normalized = cdf / cdf[-1]\n",
    "    mapping = np.round(cdf_normalized * 255).astype(np.int32)\n",
    "    img2 = mapping[img2].astype(np.uint8)\n",
    "    result_img = mapping[result_img].astype(np.uint8)\n",
    "    '''\n",
    "\n",
    "    # 拼接经过变化的img1和原始图像img2\n",
    "    result_img[transform_dist[1]:transform_dist[1]+h2,\n",
    "                transform_dist[0]:transform_dist[0]+w2] = img2\n",
    "    \n",
    "    # 图像融合消除缝隙（简单线性加权融合示例）\n",
    "    width_edge = 2\n",
    "    left_edge = transform_dist[0] - width_edge\n",
    "    right_edge = transform_dist[0] + width_edge\n",
    "    def weight_function(x):\n",
    "        return max(0, min(1, (x - left_edge) / (right_edge - left_edge)))\n",
    "    for i in range(h1):\n",
    "        for j in range(left_edge, right_edge):\n",
    "            weight = weight_function(j)\n",
    "            if result_img[i, j, 1] != 0 and result_img[i, j - w1, 1] != 0: # BGR人眼绿色敏感\n",
    "                result_img[i, j, :] = weight * result_img[i, j, :] + (1 - weight) * result_img[i, j - width_edge, :]\n",
    "    \n",
    "    return result_img"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4248b486-f4d6-4581-97f9-d62396ba4d78",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第一步：读取图片，将图片设置成一样大小的640x480\n",
    "# 第二步：找到特征点，描述子，计算单应性矩阵\n",
    "# 第三步：根据单应性矩阵对图像进行变换，然后平移\n",
    "# 第四步：拼接并输出最终结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "42efc2cd-b776-4367-8ea6-26552ce078ce",
   "metadata": {},
   "outputs": [],
   "source": [
    "img1 = cv2.imread('targ1.png')\n",
    "img2 = cv2.imread('targ2.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "7f774f4d-5e5d-4f9f-b9bd-1b57cdf81c52",
   "metadata": {},
   "outputs": [],
   "source": [
    "img_width = 1920\n",
    "img_height = 1080\n",
    "\n",
    "img1 = cv2.resize(img1, (img_width, img_height))\n",
    "img2 = cv2.resize(img2, (img_width, img_height))\n",
    "\n",
    "#inputs = np.hstack((img1, img2))\n",
    "#drawPicture('input img', inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5513b029-0dab-4c7a-8f17-89b28927aa9d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "homography matrix:\n",
      "[[ 1.73027831e+00  2.29049572e-01 -1.47858653e+03]\n",
      " [ 2.54247568e-02  1.45974761e+00 -4.76734680e+01]\n",
      " [ 3.43406334e-04  1.34294118e-05  1.00000000e+00]]\n",
      "homography mask:\n",
      "[[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "  0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 1\n",
      "  1 1 1 0 1 1 1 1 1 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0]]\n"
     ]
    }
   ],
   "source": [
    "H = get_homo(img1, img2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "9d3be714-02cf-4811-829c-cb2be9e67464",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[[-1.4785865e+03 -4.7673470e+01]]\n",
      "\n",
      " [[-1.2136111e+03  1.5069968e+03]]\n",
      "\n",
      " [[ 1.2491735e+03  9.4254279e+02]]\n",
      "\n",
      " [[ 1.1110126e+03  6.8826461e-01]]]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result_img = stitch_image(img1, img2, H)\n",
    "drawPicture('result', result_img)\n",
    "\n",
    "cv2.imwrite('contact.png', result_img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "6c0b21e5-69e1-4535-a94e-4595510a92c8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 整理成一个函数\n",
    "def sift_stitch_image(img1_name, img2_name):\n",
    "    img1 = cv2.imread(img1_name)\n",
    "    img2 = cv2.imread(img2_name)\n",
    "\n",
    "    img_width = 1920\n",
    "    img_height = 1080\n",
    "    \n",
    "    img1 = cv2.resize(img1, (img_width, img_height))\n",
    "    img2 = cv2.resize(img2, (img_width, img_height))\n",
    "\n",
    "    H = get_homo(img1, img2)\n",
    "\n",
    "    result_img = stitch_image(img1, img2, H)\n",
    "    drawPicture('result', result_img)\n",
    "    \n",
    "    cv2.imwrite('contact.png', result_img)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a2b68f4c-c2c1-4b44-838a-057988e658c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "homography matrix:\n",
      "[[ 1.73027831e+00  2.29049572e-01 -1.47858653e+03]\n",
      " [ 2.54247568e-02  1.45974761e+00 -4.76734680e+01]\n",
      " [ 3.43406334e-04  1.34294118e-05  1.00000000e+00]]\n",
      "homography mask:\n",
      "[[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "  0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 1\n",
      "  1 1 1 0 1 1 1 1 1 1 0 0 1 0 0 0 1 0 0 0 0 0 0 0 0 0 0]]\n",
      "[[[-1.4785865e+03 -4.7673470e+01]]\n",
      "\n",
      " [[-1.2136111e+03  1.5069968e+03]]\n",
      "\n",
      " [[ 1.2491735e+03  9.4254279e+02]]\n",
      "\n",
      " [[ 1.1110126e+03  6.8826461e-01]]]\n",
      "homography matrix:\n",
      "[[ 4.02090038e+00  2.32399997e-01 -4.77802968e+03]\n",
      " [-4.23511885e-02  2.83511974e+00 -1.33135354e+00]\n",
      " [ 7.31422305e-04 -2.69403477e-04  1.00000000e+00]]\n",
      "homography mask:\n",
      "[[0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 1 1 0 1 1 1\n",
      "  1 0 0 1 1 1 1 1 1 0 0 1 0 0 0]]\n",
      "[[[-4.7780298e+03 -1.3313535e+00]]\n",
      "\n",
      " [[-6.3847041e+03  4.3165122e+03]]\n",
      "\n",
      " [[ 1.5108965e+03  1.4097279e+03]]\n",
      "\n",
      " [[ 1.2236665e+03 -3.4373653e+01]]]\n"
     ]
    }
   ],
   "source": [
    "sift_stitch_image('targ1.png', 'targ2.png')\n",
    "sift_stitch_image('contact.png', 'targ3.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "876a3682-f860-41e0-beef-8681c2aa863b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
