{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2e0e38c3",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/dual-4090/miniconda3/envs/llava/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "Loading checkpoint shards: 100%|██████████| 3/3 [00:02<00:00,  1.05it/s]\n",
      "Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.48, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n"
     ]
    }
   ],
   "source": [
    "# 处理代码加载\n",
    "import torch\n",
    "from transformers import AutoProcessor, LlavaForConditionalGeneration,LlavaProcessor\n",
    "\n",
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"  # 指定使用第0和第1号GPU\n",
    "\n",
    "# model_path=\"/data/VLM/llava-v1.5-7b-hf\"\n",
    "model_path=\"/media/dual-4090/sata1/GuoTianxing/model/VLM/llava-v1.5-7b-hf\"\n",
    "# 使用device_map自动分配模型到多个GPU\n",
    "llava_model = LlavaForConditionalGeneration.from_pretrained(\n",
    "    model_path,\n",
    "    torch_dtype=torch.bfloat16,\n",
    "    low_cpu_mem_usage=True,\n",
    "    attn_implementation=\"eager\",\n",
    "    device_map=\"auto\"  # 自动分配到可用GPU\n",
    ")\n",
    "llava_processor = LlavaProcessor.from_pretrained(model_path, patch_size=14)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "45556df9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 路径加载\n",
    "from pathlib import Path\n",
    "\n",
    "def get_png_files(directory):\n",
    "    path = Path(directory)\n",
    "    return [str(file.resolve()) for file in path.rglob('*.png')]  # 递归匹配.png文件\n",
    "def get_xml_files(directory):\n",
    "    path = Path(directory)\n",
    "    return [str(file.resolve()) for file in path.rglob('*.xml')]  # 递归匹配.xml文件\n",
    "# 示例用法\n",
    "# directory = '/path/to/your/folder'  # 替换为目标文件夹路径\n",
    "# png_files = get_png_files(directory)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "050878f8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 标签真值加载\n",
    "import numpy as np\n",
    "from shapely.geometry import Polygon\n",
    "from shapely.ops import unary_union\n",
    "import xml.etree.ElementTree as ET\n",
    "from PIL import Image, ImageDraw\n",
    "\n",
    "def parse_xml_and_calculate_union_areas(xml_content):\n",
    "    # 解析XML\n",
    "    root = ET.fromstring(xml_content)\n",
    "\n",
    "    size = root.find('size')\n",
    "    image_size=(int(size.find('width').text),int(size.find('height').text))\n",
    " \n",
    "    # 存储各类别的多边形\n",
    "    class_polygons = {}\n",
    "    \n",
    "    # 遍历所有object元素\n",
    "    for obj in root.findall('object'):\n",
    "        name = obj.find('name').text\n",
    "        bbox = obj.find('bndbox')\n",
    "        xmin = int(bbox.find('xmin').text)\n",
    "        ymin = int(bbox.find('ymin').text)\n",
    "        xmax = int(bbox.find('xmax').text)\n",
    "        ymax = int(bbox.find('ymax').text)\n",
    "        \n",
    "        # 创建矩形多边形\n",
    "        polygon = Polygon([\n",
    "            (xmin, ymin),\n",
    "            (xmax, ymin),\n",
    "            (xmax, ymax),\n",
    "            (xmin, ymax)\n",
    "        ])\n",
    "        \n",
    "        if name not in class_polygons:\n",
    "            class_polygons[name] = []\n",
    "        class_polygons[name].append(polygon)\n",
    "    \n",
    "    # 计算每个类别的并集并转换为numpy数组\n",
    "    class_masks = {}\n",
    "    for name, polygons in class_polygons.items():\n",
    "        if len(polygons) > 1:\n",
    "            union = unary_union(polygons)\n",
    "        else:\n",
    "            union = polygons[0]\n",
    "        \n",
    "        # 创建一个空白图像\n",
    "        img = Image.new('L', image_size, 0)\n",
    "        draw = ImageDraw.Draw(img)\n",
    "        \n",
    "        # 绘制多边形\n",
    "        if union.geom_type == 'Polygon':\n",
    "            exterior = list(union.exterior.coords)\n",
    "            draw.polygon(exterior, fill=255)\n",
    "            for interior in union.interiors:\n",
    "                draw.polygon(list(interior.coords), fill=0)\n",
    "        elif union.geom_type == 'MultiPolygon':\n",
    "            for poly in union.geoms:\n",
    "                exterior = list(poly.exterior.coords)\n",
    "                draw.polygon(exterior, fill=255)\n",
    "                for interior in poly.interiors:\n",
    "                    draw.polygon(list(interior.coords), fill=0)\n",
    "        \n",
    "        # 转换为numpy数组\n",
    "        mask = np.array(img)\n",
    "        class_masks[name] = mask\n",
    "    \n",
    "    return class_masks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7e3a0e1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 注意力图生成\n",
    "from llava_methods import *\n",
    "from utils import *\n",
    "\n",
    "def generate_attention_map(model, processor, image_path, object, threshold):\n",
    "    question = f'Find all the {object} in the picture and locate their positions.'\n",
    "    general_question = 'Write a general description of the image.'\n",
    "\n",
    "    prompt = f\"<image>\\nUSER: {question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "    general_prompt = f\"<image>\\nUSER: {general_question} Answer the question using a single word or phrase.\\nASSISTANT:\"\n",
    "\n",
    "    image = Image.open(image_path).convert(\"RGB\")\n",
    " \n",
    "    att_map = high_res(rel_attention_llava, image, prompt, general_prompt, model, processor)\n",
    "    \n",
    "    # 此处的att_map是正方形的，需要裁剪\n",
    "    att_map = square_array_to_orin(att_map,image.size)\n",
    "\n",
    "    #针对阈值进行二值化\n",
    "    att_map = np.where(att_map >= threshold, 255, 0)\n",
    "    return att_map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8366519b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 比对注意力图和真值之间的关系\n",
    "def calculate_jaccard(arr1, arr2):\n",
    "    # 转换为布尔数组（非零为True，零为False）\n",
    "    mask1 = arr1 != 0\n",
    "    mask2 = arr2 != 0\n",
    "    \n",
    "    # 计算交集（两个数组同时非零的位置）\n",
    "    intersection = np.logical_and(mask1, mask2)\n",
    "    \n",
    "    # 计算并集（两个数组任意一个非零的位置）\n",
    "    union = np.logical_or(mask1, mask2)\n",
    "    \n",
    "    # 统计非零元素的数量\n",
    "    intersection_count = np.sum(intersection)\n",
    "    union_count = np.sum(union)\n",
    "    \n",
    "    # 计算比值（避免除以零）\n",
    "    jaccard = intersection_count / union_count if union_count > 0 else 0.0\n",
    "    \n",
    "    return jaccard\n",
    "# 使用说明\n",
    "# intersection, union, ratio = calculate_jaccard(arr1, arr2)\n",
    "# print(f\"交集非零元素数: {intersection}\")  # 输出: 2\n",
    "# print(f\"并集非零元素数: {union}\")      # 输出: 5\n",
    "# print(f\"比值 (Jaccard): {ratio:.2f}\")  # 输出: 0.40"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "97e0f575",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 统计平均值，方差和标准差,并保存到json文件中\n",
    "import numpy as np\n",
    "import json\n",
    "from collections import defaultdict\n",
    "\n",
    "def save_dict_to_json(dictionary, file_path):\n",
    "    try:\n",
    "        with open(file_path, 'w', encoding='utf-8') as f:\n",
    "            # indent参数用于格式化输出，使JSON更易读\n",
    "            json.dump(dictionary, f, ensure_ascii=False, indent=4)\n",
    "        print(f\"成功保存到 {file_path}\")\n",
    "    except Exception as e:\n",
    "        print(f\"保存失败: {e}\")\n",
    "\n",
    "def calculate_stats(data_list):\n",
    "    # 初始化存储结构（排除'ID'字段）\n",
    "    categories = ['People', 'Car', 'Bus', 'Motorcycle', 'Lamp', 'Truck']\n",
    "    stats = {\n",
    "        'mean': defaultdict(float),\n",
    "        'variance': defaultdict(float),\n",
    "        'std_dev': defaultdict(float),\n",
    "        'count': defaultdict(int)\n",
    "    }\n",
    "    \n",
    "    # 收集所有有效数值\n",
    "    values_dict = defaultdict(list)\n",
    "    for entry in data_list:\n",
    "        for category in categories:\n",
    "            value = entry.get(category)\n",
    "            if value is not None:  # 忽略None值\n",
    "                values_dict[category].append(value)\n",
    "    \n",
    "    # 计算统计量\n",
    "    for category in categories:\n",
    "        values = values_dict[category]\n",
    "        if values:  # 确保有有效数据\n",
    "            stats['mean'][category] = np.mean(values)\n",
    "            stats['variance'][category] = np.var(values)\n",
    "            stats['std_dev'][category] = np.std(values)\n",
    "            stats['count'][category] = len(values)\n",
    "    \n",
    "    return stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "e150f4ee",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Processing XML files:   0%|          | 0/200 [00:00<?, ?it/s]You may have used the wrong order for inputs. `images` should be passed before `text`. The `images` and `text` inputs will be swapped. This behavior will be deprecated in transformers v4.47.\n",
      "Processing XML files: 100%|██████████| 200/200 [02:00<00:00,  1.65it/s]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "from tqdm import tqdm\n",
    "# PATH=\"/data/dataset/M3FD/M3FD_Detection\"\n",
    "PATH=\"/media/dual-4090/sata1/GuoTianxing/dataset/M3FD/M3FD_Detection\"\n",
    "chosen = 1\n",
    "modality=[\"ir\",\"vi\",\"unit\"]\n",
    "xml_file_list=get_xml_files(PATH)\n",
    "result=[]\n",
    "keys = [\"ID\", \"People\", \"Car\", \"Bus\", \"Motorcycle\", \"Lamp\", \"Truck\"]\n",
    "\n",
    "for i in tqdm(range(len(xml_file_list)-4000), desc=\"Processing XML files\"):\n",
    "    result_dict = {key: None for key in keys}\n",
    "    file_name = os.path.basename(xml_file_list[i])  # 输出: \"00001.xml\"\n",
    "    # 分离文件名和扩展名\n",
    "    name = os.path.splitext(file_name)[0]  # 输出: \"00001\"\n",
    "    result_dict[\"ID\"]=name\n",
    "    if chosen == 1 or chosen == 2:\n",
    "        image_path = PATH+\"/\"+modality[chosen-1]+\"/\"+name+\".png\"\n",
    "    else:\n",
    "        image_path1 = PATH+\"/\"+modality[0]+\"/\"+name+\".png\"\n",
    "        image_path2 = PATH+\"/\"+modality[1]+\"/\"+name+\".png\" \n",
    "    tree = ET.parse(xml_file_list[i])  # 替换为你的XML文件路径\n",
    "    root = tree.getroot()\n",
    "    # 将整个XML内容转为字符串\n",
    "    xml_content = ET.tostring(root, encoding='unicode')\n",
    "    label = parse_xml_and_calculate_union_areas(xml_content)\n",
    "    for item_type, value in label.items():\n",
    "        if chosen == 1 or chosen == 2:\n",
    "            att_array = generate_attention_map(llava_model,llava_processor,image_path,item_type,threshold=1.309)\n",
    "            result_dict[item_type]=calculate_jaccard(att_array,value)\n",
    "        else:\n",
    "            att_array1 = generate_attention_map(llava_model,llava_processor,image_path1,item_type,threshold=1.309)\n",
    "            att_array2 = generate_attention_map(llava_model,llava_processor,image_path2,item_type,threshold=1.309)\n",
    "            att_array = np.logical_and(att_array1, att_array2)\n",
    "            result_dict[item_type]=calculate_jaccard(att_array,value)\n",
    "    result.append(result_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a5b8b305",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "People:\n",
      "  平均值: 0.11036035292975484\n",
      "  方差: 0.017879265048103967\n",
      "  标准差: 0.13371336899541483\n",
      "  有效统计数: 141\n",
      "Car:\n",
      "  平均值: 0.30033771654424374\n",
      "  方差: 0.029434992989953574\n",
      "  标准差: 0.171566293280334\n",
      "  有效统计数: 140\n",
      "Bus:\n",
      "  平均值: 0.17692584614818582\n",
      "  方差: 0.03797114573306122\n",
      "  标准差: 0.19486186320843085\n",
      "  有效统计数: 30\n",
      "Motorcycle:\n",
      "  平均值: 0.03727310840583328\n",
      "  方差: 0.006895483449623083\n",
      "  标准差: 0.08303904774034372\n",
      "  有效统计数: 22\n",
      "Lamp:\n",
      "  平均值: 0.007141478774960677\n",
      "  方差: 0.00010145122611036465\n",
      "  标准差: 0.010072299941441609\n",
      "  有效统计数: 34\n",
      "Truck:\n",
      "  平均值: 0.1661376331827999\n",
      "  方差: 0.046877192530145516\n",
      "  标准差: 0.21651141431838072\n",
      "  有效统计数: 26\n",
      "成功保存到 batch_detection_ir.json\n"
     ]
    }
   ],
   "source": [
    "results = calculate_stats(result)\n",
    "data_dict = {}\n",
    "for category in ['People', 'Car', 'Bus', 'Motorcycle', 'Lamp', 'Truck']:\n",
    "    print(f\"{category}:\")\n",
    "    \n",
    "    print(f\"  平均值: {results['mean'].get(category, '无数据')}\")\n",
    "    print(f\"  方差: {results['variance'].get(category, '无数据')}\")\n",
    "    print(f\"  标准差: {results['std_dev'].get(category, '无数据')}\")\n",
    "    print(f\"  有效统计数: {results['count'].get(category, 0)}\")\n",
    "    bench_dict={}\n",
    "    bench_dict['mean']=results['mean'].get(category, '无数据')\n",
    "    bench_dict['variance']=results['variance'].get(category, '无数据')\n",
    "    bench_dict['std_dev']=results['std_dev'].get(category, '无数据')\n",
    "    bench_dict['count']=results['count'].get(category, 0)\n",
    "    data_dict[category]= bench_dict\n",
    "save_dict_to_json(data_dict, f'./result/M3FD/batch_detection_{modality[chosen-1]}.json')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "65249868",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "# 输入为attention_map\n",
    "# 这个函数块主要是用来统计numpy二维数组的元素数值，为后续设计遮罩做准备\n",
    "def draw_value_of_array(array):\n",
    "    # 计算区间边界\n",
    "    bin_edges = np.linspace(array.min(), array.max(), num=21)  # 20个区间需要21个边界点\n",
    "    plt.hist(array.flatten(), bins=bin_edges, color='orange', edgecolor='black')\n",
    "    plt.xlabel('Value Interval')  # 修改横坐标标签为Value Interval\n",
    "    plt.ylabel('Num of Pixel')  # 修改纵坐标标签为Num of Pixel\n",
    "    plt.title('Element numerical distribution (box statistics)')\n",
    "    plt.xticks(bin_edges, rotation=45)  # 显示所有区间边界，并旋转45度避免重叠\n",
    "    plt.show()\n",
    "\n",
    "def draw_value_of_arrays(arrays):\n",
    "    # 初始化存储所有数组的直方图数据\n",
    "    all_hist_data = []\n",
    "    \n",
    "    # 计算所有数组的最小值和最大值，用于统一区间边界\n",
    "    global_min = min([arr.min() for arr in arrays])\n",
    "    global_max = max([arr.max() for arr in arrays])\n",
    "    bin_edges = np.linspace(global_min, global_max, num=35)  # 20个区间\n",
    "    \n",
    "    # 遍历每个数组，计算直方图数据\n",
    "    for array in arrays:\n",
    "        hist, _ = np.histogram(array.flatten(), bins=bin_edges)\n",
    "        all_hist_data.append(hist)\n",
    "    \n",
    "    # 计算每个区间的平均像素数量\n",
    "    avg_hist = np.mean(all_hist_data, axis=0)\n",
    "    \n",
    "    # 绘制直方图（使用区间中点作为横坐标）\n",
    "    bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2\n",
    "    plt.bar(bin_centers, avg_hist, width=np.diff(bin_edges), color='orange', edgecolor='black')\n",
    "    \n",
    "    plt.xlabel('Value Interval')\n",
    "    plt.ylabel('Avg Num of Pixel')\n",
    "    plt.title('Average Element Numerical Distribution (Box Statistics)')\n",
    "    plt.xticks(bin_edges, rotation=45)\n",
    "    plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llava",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
