{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant05/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant05.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant08/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant08.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant03/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant03.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant04/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant04.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant01/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant01.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant06/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant06.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant07/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant07.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant02/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant02.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant09/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant09.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant10/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant10.mp4\n",
      "/data/xusc/exp/topictrack-bee/data/antmove/test/ant11/img1 /data/xusc/exp/topictrack-bee/data/antmove/ant11.mp4\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<__main__.PreDataset at 0x7f0ce0164280>"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "import  os \n",
    "from os.path import join, isdir, isfile, exists\n",
    "from os import makedirs\n",
    "\n",
    "import cv2\n",
    "from glob import glob\n",
    "\n",
    "import json\n",
    "\n",
    "\n",
    "def makedir(path):\n",
    "    \n",
    "    if not exists(path):\n",
    "        makedirs(path)\n",
    "\n",
    "    \n",
    "\n",
    "def extract_frames_from_video(video_path, save_path):\n",
    "    # 确保结果存储路径存在\n",
    "    if not os.path.exists(save_path):\n",
    "        os.makedirs(save_path)\n",
    "    \n",
    "    # 使用OpenCV打开视频文件\n",
    "    video = cv2.VideoCapture(video_path)\n",
    "    \n",
    "    # 逐帧读取并保存图像\n",
    "    frame_count = 1\n",
    "    while True:\n",
    "        success, frame = video.read()\n",
    "        \n",
    "        # 如果成功读取到帧\n",
    "        if success:\n",
    "            # 构建帧图像的保存路径和文件名\n",
    "            frame_filename = f\"%06d.jpg\"%(frame_count)\n",
    "            frame_path = os.path.join(save_path, frame_filename)\n",
    "            \n",
    "            # 保存当前帧图像\n",
    "            cv2.imwrite(frame_path, frame)\n",
    "            \n",
    "            frame_count += 1\n",
    "        else:\n",
    "            break\n",
    "    \n",
    "    \n",
    "    video.release()\n",
    "\n",
    "\n",
    "class PreDataset:\n",
    "\n",
    "    def __init__(self,path):\n",
    "        self.path = path \n",
    "        video_paths = glob(path+\"/*.mp4\")\n",
    "\n",
    "        \n",
    "\n",
    "        test_save_path = join(path,'test')\n",
    "\n",
    "        \n",
    "        for p in video_paths:\n",
    "            video_name = p.split('/')[-1].split('.')[0]\n",
    "            spath = join(test_save_path,video_name,\"img1\")\n",
    "            makedir(spath)\n",
    "            print(spath,p)\n",
    "            extract_frames_from_video(p,spath)\n",
    "            \n",
    "\n",
    "\n",
    "# root = \"/data/xusc/exp/topictrack-bee/data/beedance\"\n",
    "root = \"/data/xusc/exp/topictrack-bee/data/antmove\"\n",
    "\n",
    "\n",
    "PreDataset(root)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ant01: 270 images\n",
      "loaded test for 270 images and 0 samples\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def convert_coco_format(DATA_PATH = \"/data/xusc/exp/topictrack-bee/data/beedance\",SPLITS = [\"test\"]):\n",
    "\n",
    "    OUT_PATH = os.path.join(DATA_PATH, \"annotations\")\n",
    "    # SPLITS = [\"train\", \"test\"]\n",
    "\n",
    "    if not os.path.exists(OUT_PATH):\n",
    "        os.makedirs(OUT_PATH)\n",
    "\n",
    "    for split in SPLITS:\n",
    "\n",
    "        data_path = os.path.join(DATA_PATH, split)\n",
    "        out_path = os.path.join(OUT_PATH, \"{}.json\".format(split))\n",
    "        out = {\n",
    "            \"images\": [],\n",
    "            \"annotations\": [],\n",
    "            \"videos\": [],\n",
    "            \"categories\": [{\"id\": 1, \"name\": \"dancer\"}],\n",
    "        }\n",
    "        seqs = os.listdir(data_path)\n",
    "        image_cnt = 0\n",
    "        ann_cnt = 0\n",
    "        video_cnt = 0\n",
    "        for seq in sorted(seqs):\n",
    "            if \".DS_Store\" in seq or \".ipy\" in seq:\n",
    "                continue\n",
    "\n",
    "            video_cnt += 1  # video sequence number.\n",
    "            out[\"videos\"].append({\"id\": video_cnt, \"file_name\": seq})\n",
    "            seq_path = os.path.join(data_path, seq)\n",
    "            img_path = os.path.join(seq_path, \"img1\")\n",
    "            ann_path = os.path.join(seq_path, \"gt/gt.txt\")\n",
    "            images = os.listdir(img_path)\n",
    "            num_images = len([image for image in images if \"jpg\" in image])  # half and half\n",
    "\n",
    "            for i in range(num_images):\n",
    "                img = cv2.imread(os.path.join(data_path, \"{}/img1/{:06d}.jpg\".format(seq, i + 1)))\n",
    "                height, width = img.shape[:2]\n",
    "                image_info = {\n",
    "                    \"file_name\": \"{}/img1/{:06d}.jpg\".format(seq, i + 1),  # image name.\n",
    "                    \"id\": image_cnt + i + 1,  # image number in the entire training set.\n",
    "                    \"frame_id\": i + 1,  # image number in the video sequence, starting from 1.\n",
    "                    \"prev_image_id\": image_cnt + i if i > 0 else -1,  # image number in the entire training set.\n",
    "                    \"next_image_id\": image_cnt + i + 2 if i < num_images - 1 else -1,\n",
    "                    \"video_id\": video_cnt,\n",
    "                    \"height\": height,\n",
    "                    \"width\": width,\n",
    "                }\n",
    "                out[\"images\"].append(image_info)\n",
    "            print(\"{}: {} images\".format(seq, num_images))\n",
    "\n",
    "            if split != \"test\":\n",
    "                anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=\",\")\n",
    "                for i in range(anns.shape[0]):\n",
    "                    frame_id = int(anns[i][0])\n",
    "                    track_id = int(anns[i][1])\n",
    "                    cat_id = int(anns[i][7])\n",
    "                    ann_cnt += 1\n",
    "                    category_id = 1\n",
    "                    ann = {\n",
    "                        \"id\": ann_cnt,\n",
    "                        \"category_id\": category_id,\n",
    "                        \"image_id\": image_cnt + frame_id,\n",
    "                        \"track_id\": track_id,\n",
    "                        \"bbox\": anns[i][2:6].tolist(),\n",
    "                        \"conf\": float(anns[i][6]),\n",
    "                        \"iscrowd\": 0,\n",
    "                        \"area\": float(anns[i][4] * anns[i][5]),\n",
    "                    }\n",
    "                    out[\"annotations\"].append(ann)\n",
    "                print(\"{}: {} ann images\".format(seq, int(anns[:, 0].max())))\n",
    "\n",
    "            image_cnt += num_images\n",
    "        print(\"loaded {} for {} images and {} samples\".format(split, len(out[\"images\"]), len(out[\"annotations\"])))\n",
    "        json.dump(out, open(out_path, \"w\"))\n",
    "\n",
    "convert_coco_format(DATA_PATH='/data/xusc/exp/topictrack-bee/data/antmove')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "beetrack",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
