{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from modules import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "ename": "FileNotFoundError",
     "evalue": "[WinError 3] 系统找不到指定的路径。: 'C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\2_Train'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[2], line 6\u001b[0m\n\u001b[0;32m      4\u001b[0m img_val_folder \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(folder_before_pre,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mval\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m      5\u001b[0m img_test_folder \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(folder_before_pre,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtest\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m----> 6\u001b[0m anns \u001b[38;5;241m=\u001b[39m [file \u001b[38;5;28;01mfor\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m \u001b[43mos\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlistdir\u001b[49m\u001b[43m(\u001b[49m\u001b[43msource_folder\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mif\u001b[39;00m file\u001b[38;5;241m.\u001b[39mendswith(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.json\u001b[39m\u001b[38;5;124m'\u001b[39m)]\n\u001b[0;32m      7\u001b[0m files \u001b[38;5;241m=\u001b[39m [file \u001b[38;5;28;01mfor\u001b[39;00m file \u001b[38;5;129;01min\u001b[39;00m os\u001b[38;5;241m.\u001b[39mlistdir(source_folder) \u001b[38;5;28;01mif\u001b[39;00m file\u001b[38;5;241m.\u001b[39mendswith(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.jpg\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;129;01mand\u001b[39;00m file\u001b[38;5;241m.\u001b[39mreplace(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.jpg\u001b[39m\u001b[38;5;124m'\u001b[39m,\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m.json\u001b[39m\u001b[38;5;124m'\u001b[39m) \u001b[38;5;129;01min\u001b[39;00m anns]\n\u001b[0;32m      9\u001b[0m \u001b[38;5;28mprint\u001b[39m(files)\n",
      "\u001b[1;31mFileNotFoundError\u001b[0m: [WinError 3] 系统找不到指定的路径。: 'C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\2_Train'"
     ]
    }
   ],
   "source": [
    "source_folder = \"\"\n",
    "folder_before_pre = \"\"\n",
    "img_train_folder = os.path.join(folder_before_pre,'train')\n",
    "img_val_folder = os.path.join(folder_before_pre,'val')\n",
    "img_test_folder = os.path.join(folder_before_pre,'test')\n",
    "anns = [file for file in os.listdir(source_folder) if file.endswith('.json')]\n",
    "files = [file for file in os.listdir(source_folder) if file.endswith('.jpg') and file.replace('.jpg','.json') in anns]\n",
    "\n",
    "print(files)\n",
    "print(files == anns)\n",
    "# file = 'b02519Z.jpg'\n",
    "# label = file.replace('.jpg','.json')\n",
    "# print(label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 已经分好，这些代码不再做修改\n",
    "files = files[:500]\n",
    "# 将数据划分为70%训练，20%验证，10%测试\n",
    "a = 350\n",
    "b = 100\n",
    "c = 50\n",
    "# 将文件分类复制到现有文件夹中\n",
    "for file in files[:a]:\n",
    "    label = file.replace('.jpg','.json')\n",
    "    label_path1 = os.path.join(source_folder,label)\n",
    "    label_path2 = os.path.join(label_train_folder,label)\n",
    "    file_path1 = os.path.join(source_folder,file)\n",
    "    file_path2 = os.path.join(img_train_folder,file)\n",
    "    shutil.copy2(file_path1,file_path2)\n",
    "    shutil.copy2(label_path1,label_path2)\n",
    "for file in files[a:a+b]:\n",
    "    label = file.replace('.jpg','.json')\n",
    "    label_path1 = os.path.join(source_folder,label)\n",
    "    label_path2 = os.path.join(label_val_folder,label)\n",
    "    file_path1 = os.path.join(source_folder,file)\n",
    "    file_path2 = os.path.join(img_val_folder,file)\n",
    "    shutil.copy2(file_path1,file_path2)\n",
    "    shutil.copy2(label_path1,label_path2)\n",
    "for file in files[a+b:]:\n",
    "    label = file.replace('.jpg','.json')\n",
    "    label_path1 = os.path.join(source_folder,label)\n",
    "    label_path2 = os.path.join(label_test_folder,label)\n",
    "    file_path1 = os.path.join(source_folder,file)\n",
    "    file_path2 = os.path.join(img_test_folder,file)\n",
    "    shutil.copy2(file_path1,file_path2)\n",
    "    shutil.copy2(label_path1,label_path2)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 对图像进行预处理(根据相关论文)\n",
    "# 相关函数\n",
    "# 高斯滤波对图像进行平滑处理，以去除噪声：\n",
    "def Gauss(img):\n",
    "    blur = []\n",
    "    blur = cv2.GaussianBlur(img, (3,3), 0)\n",
    "    return(blur)\n",
    "    \n",
    "# 直方图均衡化进行图像增强\n",
    "def hist(img):\n",
    "    \n",
    "    hist,bins = np.histogram(img,256,[0,256])\n",
    "    cumul = np.cumsum(hist) #累积直方图（计算每个灰度级的累计像素数量）\n",
    "    # 均衡化\n",
    "          #归一化\n",
    "    cumul_normalized = (cumul - cumul.min()) * 255 / (cumul.max() - cumul.min())\n",
    "          #应用\n",
    "    img1 = np.interp(img,bins[:-1],cumul_normalized).astype(np.uint8)\n",
    "    return img1\n",
    "\n",
    "def cvt(img):\n",
    "    # 二值化处理(增亮消暗)\n",
    "    t = 127\n",
    "    for vals in img:\n",
    "        if vals>t:\n",
    "            vals = 255\n",
    "        else:\n",
    "            vals = 0\n",
    "    \n",
    "#下面的三个函数在复现论文3的预处理\n",
    "#线性灰度变换(自适应)\n",
    "def linear(img):\n",
    "    # 定义线性变换的参数 \n",
    "    r1, s1 = np.min(img), 0\n",
    "    r2, s2 = np.max(img), 255\n",
    "    print(r1)\n",
    "    print(r2)\n",
    "    # 计算线性变换的斜率和截距\n",
    "    slope = (s2 - s1) / (r2 - r1)\n",
    "    intercept = s1 - slope * r1\n",
    "\n",
    "    # 应用线性变换\n",
    "    transformed_image = np.clip((img - intercept) / slope, 0, 255).astype(np.uint8)\n",
    "    return transformed_image\n",
    "\n",
    "# 直方图均衡化：\n",
    "def hist(image):\n",
    "    # 计算图像的直方图\n",
    "    hist, bins = np.histogram(image, 256, [0, 256])\n",
    "    cdf = hist.cumsum()\n",
    "    cdf_normalized = (cdf - cdf.min()) * 255 / (cdf.max() - cdf.min())\n",
    "\n",
    "    # 创建直方图均衡化的图像\n",
    "    equalized_image = np.interp(image, bins[:-1], cdf_normalized).astype(np.uint8)\n",
    "\n",
    "    return equalized_image\n",
    "\n",
    "# 二值化处理\n",
    "def Otsu(img):\n",
    "    _, binary_image = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
    "    return binary_image\n",
    "\n",
    "# 形态学运算处理\n",
    "def xingtai(img,rho_max,rho_min):\n",
    "    # 连通区域标记\n",
    "    labels = measure.label(img, connectivity=2)\n",
    "    # 获取连通区域的属性\n",
    "    regions = measure.regionprops(labels)\n",
    "    # 初始化参数\n",
    "    Ncon = len(regions)\n",
    "    Aavg = np.mean([region.area for region in regions])\n",
    "    As = np.std([region.area for region in regions])\n",
    "\n",
    "    # 判别干扰因素的区域\n",
    "    for region in regions:\n",
    "        Ai = region.area\n",
    "        if Ai < Aavg - As:\n",
    "           # 点状噪声区域，填充为背景\n",
    "            img[labels == region.label] = 0\n",
    "        elif Ai > Aavg + As:\n",
    "        # 片状斑纹区域，填充为背景\n",
    "            img[labels == region.label] = 0\n",
    "        \n",
    "    # 重新标记连通区域\n",
    "    labels_cleaned = measure.label(img, connectivity=2)\n",
    "\n",
    "    # 判别文字区域\n",
    "    for region in measure.regionprops(labels_cleaned):\n",
    "        bbox = region.bbox\n",
    "        y_RB, x_RB, y_LT, x_LT = bbox\n",
    "        height = y_RB - y_LT\n",
    "        width = x_RB - x_LT\n",
    "        if height == 0 or width == 0:\n",
    "            continue\n",
    "        aspect_ratio = height / width\n",
    "        if aspect_ratio > rho_max or aspect_ratio < rho_min:\n",
    "        # 非文字区域，填充为背景\n",
    "            img[y_LT:y_RB, x_LT:x_RB] = 0\n",
    "    return img\n",
    "\n",
    "# 边缘梯度协方差引导的甲骨文字边缘修复（复现不成功，暂时跳过）\n",
    "def gradient_magnitude(image):\n",
    "    # 计算图像的水平和垂直梯度\n",
    "    Ix = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=3)\n",
    "    Iy = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=3)\n",
    "    return np.sqrt(Ix**2 + Iy**2)\n",
    "\n",
    "def covariance_matrix(Ix, Iy):\n",
    "    # 计算协方差矩阵\n",
    "    cov = np.array([[np.mean(Ix*Ix), np.mean(Ix*Iy)], [np.mean(Ix*Iy), np.mean(Iy*Iy)]])\n",
    "    return cov\n",
    "\n",
    "def edge_repair(image, window_size=3):\n",
    "    # 使用Canny算子获得边缘图像\n",
    "    edges = cv2.Canny(image, 50, 150)\n",
    "    \n",
    "    # 计算图像的梯度幅度\n",
    "    grad_mag = gradient_magnitude(edges)\n",
    "    \n",
    "    # 对梯度幅度进行高斯平滑\n",
    "    grad_mag_blurred = cv2.GaussianBlur(grad_mag, (window_size, window_size), 0)\n",
    "    \n",
    "    # 计算协方差矩阵\n",
    "    Ix, Iy = np.gradient(grad_mag_blurred)\n",
    "    cov_matrix = covariance_matrix(Ix, Iy)\n",
    "    \n",
    "    # 计算协方差矩阵的特征值和特征向量\n",
    "    eig_val, eig_vec = np.linalg.eig(cov_matrix)\n",
    "    \n",
    "    # 按特征值大小排序特征向量\n",
    "    eig_vec_sorted = eig_vec[:, eig_val.argsort()[::-1]]\n",
    "    \n",
    "    # 计算边缘方向\n",
    "    theta = np.arctan2(eig_vec_sorted[1, 0], eig_vec_sorted[0, 0])\n",
    "    \n",
    "    # 生成边缘模板\n",
    "    edge_template = np.array([[np.cos(theta), np.sin(theta)], \n",
    "                              [-np.sin(theta), np.cos(theta)]])\n",
    "    \n",
    "    # 对原始图像进行卷积，实现边缘修复\n",
    "    repaired_edges = cv2.filter2D(edges, -1, edge_template)\n",
    "    \n",
    "    # 平滑区域修复\n",
    "    smooth_region = cv2.dilate(repaired_edges, np.ones((5,5), np.uint8), iterations=1)\n",
    "    smooth_repaired = cv2.inpaint(image, smooth_region, 3, cv2.INPAINT_TELEA)\n",
    "    \n",
    "    # 角点区域修复\n",
    "    corner_region = cv2.erode(repaired_edges, np.ones((3,3), np.uint8), iterations=1)\n",
    "    corner_repaired = cv2.inpaint(image, corner_region, 3, cv2.INPAINT_TELEA)\n",
    "    \n",
    "    # 合并修复后的图像\n",
    "    final_repaired = np.copy(image)\n",
    "    final_repaired[smooth_region > 0] = smooth_repaired[smooth_region > 0]\n",
    "    final_repaired[corner_region > 0] = corner_repaired[corner_region > 0]\n",
    "    return final_repaired\n",
    "def Otsu(img):\n",
    "    img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n",
    "    _, binary_image = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
    "    return binary_image\n",
    "\n",
    "def preprocess(img):\n",
    "    img = img.resize((256,256))\n",
    "    # img = Gauss(img)\n",
    "    img = linear(img)\n",
    "    img = Otsu(img)\n",
    "    img = xingtai(img,10,0.1)\n",
    "    # img = edge_repair(img)\n",
    "    return img \n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['b02972.jpg', 'b02973.jpg', 'b02974.jpg', 'b02975.jpg', 'b02976.jpg', 'b02977.jpg', 'b02978Z.jpg', 'b02979.jpg', 'b02980.jpg', '结果']\n",
      "C:\\Users\\鲁睿\\Desktop\\DIP\\大作业\\甲骨文图片数据\\预处理图像测试\\b02972.jpg\n",
      "[[208 208 209 ... 242 242 240]\n",
      " [208 208 209 ... 242 242 246]\n",
      " [208 208 209 ... 242 242 242]\n",
      " ...\n",
      " [214 214 214 ... 238 241 242]\n",
      " [214 214 214 ... 239 242 242]\n",
      " [214 214 214 ... 240 243 242]]\n",
      "20\n",
      "254\n",
      "29\n",
      "248\n",
      "22\n",
      "255\n",
      "25\n",
      "255\n",
      "23\n",
      "255\n",
      "28\n",
      "255\n",
      "27\n",
      "255\n",
      "25\n",
      "255\n",
      "25\n",
      "250\n"
     ]
    },
    {
     "ename": "PermissionError",
     "evalue": "[Errno 13] Permission denied: 'C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\预处理图像测试\\\\结果'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mPermissionError\u001b[0m                           Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[43], line 23\u001b[0m\n\u001b[0;32m     19\u001b[0m pretest_result_path \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mjoin(pretest_results_path,img)\n\u001b[0;32m     20\u001b[0m \u001b[38;5;66;03m# img = np.array(Image.open(pretest_img_path))\u001b[39;00m\n\u001b[0;32m     21\u001b[0m \u001b[38;5;66;03m# if img is None:\u001b[39;00m\n\u001b[0;32m     22\u001b[0m \u001b[38;5;66;03m#     print(\"None\")\u001b[39;00m\n\u001b[1;32m---> 23\u001b[0m img \u001b[38;5;241m=\u001b[39m \u001b[43mImage\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpretest_img_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     24\u001b[0m img \u001b[38;5;241m=\u001b[39m preprocess(img)\n\u001b[0;32m     25\u001b[0m img \u001b[38;5;241m=\u001b[39m Image\u001b[38;5;241m.\u001b[39mfromarray(img)\n",
      "File \u001b[1;32md:\\Ananocnda\\envs\\Digital_image_processing\\Lib\\site-packages\\PIL\\Image.py:3247\u001b[0m, in \u001b[0;36mopen\u001b[1;34m(fp, mode, formats)\u001b[0m\n\u001b[0;32m   3244\u001b[0m     filename \u001b[38;5;241m=\u001b[39m fp\n\u001b[0;32m   3246\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m filename:\n\u001b[1;32m-> 3247\u001b[0m     fp \u001b[38;5;241m=\u001b[39m \u001b[43mbuiltins\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mopen\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilename\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrb\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m   3248\u001b[0m     exclusive_fp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m   3250\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n",
      "\u001b[1;31mPermissionError\u001b[0m: [Errno 13] Permission denied: 'C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\预处理图像测试\\\\结果'"
     ]
    }
   ],
   "source": [
    "import cv2\n",
    "pretest_imgs_path = \"C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\预处理图像测试\"\n",
    "pretest_results_path = \"C:\\\\Users\\\\鲁睿\\\\Desktop\\\\DIP\\\\大作业\\\\甲骨文图片数据\\\\预处理图像测试\\\\结果\"\n",
    "pretest_imgs = [img for img in os.listdir(pretest_imgs_path)]\n",
    "print(pretest_imgs)\n",
    "print(os.path.join(pretest_imgs_path,pretest_imgs[0]))\n",
    "\n",
    "matrix1 = np.array(Image.open(os.path.join(pretest_imgs_path,pretest_imgs[1])))\n",
    "img = Image.open(os.path.join(pretest_imgs_path,pretest_imgs[1]))\n",
    "# img.show()\n",
    "print(matrix1)\n",
    "\n",
    "# img = preprocess(img)\n",
    "# img = Image.fromarray(img)\n",
    "# img.show()\n",
    "\n",
    "for img in pretest_imgs:\n",
    "    pretest_img_path  = os.path.join(pretest_imgs_path,img)\n",
    "    pretest_result_path = os.path.join(pretest_results_path,img)\n",
    "    # img = np.array(Image.open(pretest_img_path))\n",
    "    # if img is None:\n",
    "    #     print(\"None\")\n",
    "    img = Image.open(pretest_img_path)\n",
    "    img = preprocess(img)\n",
    "    img = Image.fromarray(img)\n",
    "    img.save(pretest_result_path)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_imgs = [img for img in os.listdir(img_train_folder)]\n",
    "val_imgs = [img for img in os.listdir(img_val_folder)]\n",
    "test_imgs = [img for img in os.listdir(img_test_folder)]\n",
    "imgpre_result_train = \"\"\n",
    "imgpre_result_val = \"\"\n",
    "imgpre_result_test = \"\"\n",
    "\n",
    "for img in train_imgs:\n",
    "    pretest_img_path  = os.path.join(img_train_folder,img)\n",
    "    pretest_result_path = os.path.join(imgpre_result_train,img)\n",
    "    img = Image.open(pretest_img_path)\n",
    "    img = preprocess(img)\n",
    "    img = Image.fromarray(img)\n",
    "    img.save(pretest_result_path)\n",
    "for img in test_imgs:\n",
    "    pretest_img_path  = os.path.join(img_test_folder,img)\n",
    "    pretest_result_path = os.path.join(imgpre_result_test,img)\n",
    "    img = Image.open(pretest_img_path)\n",
    "    img = preprocess(img)\n",
    "    img = Image.fromarray(img)\n",
    "    img.save(pretest_result_path)\n",
    "for img in val_imgs:\n",
    "    pretest_img_path  = os.path.join(img_val_folder,img)\n",
    "    pretest_result_path = os.path.join(imgpre_result_val,img)\n",
    "    img = Image.open(pretest_img_path)\n",
    "    img = preprocess(img)\n",
    "    img = Image.fromarray(img)\n",
    "    img.save(pretest_result_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'img_name': 'b02519Z', 'ann': [[64.0, 194.0, 134.0, 277.0, 1.0], [150.0, 168.0, 204.0, 344.0, 1.0], [206.0, 170.0, 259.0, 349.0, 1.0]]}\n",
      "<class 'list'>\n"
     ]
    }
   ],
   "source": [
    "# 将json文件中的数据标注写入到txt文本中\n",
    "import json \n",
    "labelpre_train_folder = \"\"\n",
    "labelpre_val_folder = \"\"\n",
    "labelpre_test_folder = \"\"\n",
    "\n",
    "label_train_folder = \"\"\n",
    "label_val_folder = \"\"\n",
    "label_test_folder = \"\"\n",
    "\n",
    "labelpre_train_files = os.listdir(labelpre_train_folder)\n",
    "test_file_path = os.path.join(labelpre_train_folder,labelpre_train_files[0])\n",
    "file = open(test_file_path,'r',encoding='utf-8')\n",
    "data = json.load(file)\n",
    "print(data)\n",
    "print(type(data['ann']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def json2txt(label_source_folder,label_target_folder):\n",
    "    json_names = os.listdir(label_source_folder)\n",
    "    for file in json_names:\n",
    "        file_path = os.path.join(label_source_folder,file)\n",
    "        with open(file_path,'r',encoding='utf-8') as file:\n",
    "            data = json.load(file)\n",
    "        txt_name = data['img_name'] + '.txt'\n",
    "        txt_file_path = os.path.join(label_target_folder,txt_name)\n",
    "        with open(txt_file_path,'w',encoding='utf-8') as txt_file:\n",
    "            for anns in data['ann']:\n",
    "                anns = anns[:-1]\n",
    "                # 用字符串格式化将浮点数转换为字符串，并保留小数点后的位数\n",
    "                txt_file.write(','.join(map(float,anns))+'\\n')\n",
    "    return 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "sequence item 0: expected str instance, float found",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[18], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[43mjson2txt\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlabelpre_train_folder\u001b[49m\u001b[43m,\u001b[49m\u001b[43mlabel_train_folder\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m      2\u001b[0m json2txt(labelpre_test_folder,label_test_folder)\n\u001b[0;32m      3\u001b[0m json2txt(labelpre_val_folder,label_val_folder)\n",
      "Cell \u001b[1;32mIn[17], line 14\u001b[0m, in \u001b[0;36mjson2txt\u001b[1;34m(label_source_folder, label_target_folder)\u001b[0m\n\u001b[0;32m     12\u001b[0m             \u001b[38;5;28;01mfor\u001b[39;00m ann \u001b[38;5;129;01min\u001b[39;00m anns:\n\u001b[0;32m     13\u001b[0m                 ann \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mfloat\u001b[39m(ann)\n\u001b[1;32m---> 14\u001b[0m             txt_file\u001b[38;5;241m.\u001b[39mwrite(\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m,\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mjoin\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mmap\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mfloat\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43manns\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m+\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m     15\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;241m0\u001b[39m\n",
      "\u001b[1;31mTypeError\u001b[0m: sequence item 0: expected str instance, float found"
     ]
    }
   ],
   "source": [
    "json2txt(labelpre_train_folder,label_train_folder)\n",
    "json2txt(labelpre_test_folder,label_test_folder)\n",
    "json2txt(labelpre_val_folder,label_val_folder)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "64.0,194.0,134.0,277.0\n",
      "150.0,168.0,204.0,344.0\n",
      "206.0,170.0,259.0,349.0\n",
      "\n",
      "<class 'str'>\n"
     ]
    }
   ],
   "source": [
    "\n",
    "train_label = os.listdir(label_train_folder)\n",
    "with open(os.path.join(label_train_folder,train_label[0]),'r',encoding='utf-8') as txt:\n",
    "    txt_file = txt.read()\n",
    "print(txt_file)\n",
    "print(type(txt_file))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Digital_image_processing",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
