{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-03-15T14:40:28.783775Z",
     "start_time": "2025-03-15T14:40:28.776843Z"
    }
   },
   "source": [
    "import os\n",
    "import h5py\n",
    "import json\n",
    "import cv2\n",
    "import torch\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "from typing import Callable\n",
    "from transformers import BertTokenizer, BertModel\n",
    "from torch.utils.data import Dataset"
   ],
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:40:37.406756Z",
     "start_time": "2025-03-15T14:40:29.844227Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 初始化预训练模型路径\n",
    "W_MODEL_NAME = 'bert-base-chinese'\n",
    "\n",
    "# 加载分词器和模型\n",
    "W_tokenizer = BertTokenizer.from_pretrained(W_MODEL_NAME)\n",
    "W_model = BertModel.from_pretrained(W_MODEL_NAME, output_hidden_states=True)\n",
    "W_model.eval()"
   ],
   "id": "8db64039982edc4e",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertModel(\n",
       "  (embeddings): BertEmbeddings(\n",
       "    (word_embeddings): Embedding(21128, 768, padding_idx=0)\n",
       "    (position_embeddings): Embedding(512, 768)\n",
       "    (token_type_embeddings): Embedding(2, 768)\n",
       "    (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "    (dropout): Dropout(p=0.1, inplace=False)\n",
       "  )\n",
       "  (encoder): BertEncoder(\n",
       "    (layer): ModuleList(\n",
       "      (0-11): 12 x BertLayer(\n",
       "        (attention): BertAttention(\n",
       "          (self): BertSdpaSelfAttention(\n",
       "            (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "            (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "            (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "          (output): BertSelfOutput(\n",
       "            (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (intermediate): BertIntermediate(\n",
       "          (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          (intermediate_act_fn): GELUActivation()\n",
       "        )\n",
       "        (output): BertOutput(\n",
       "          (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "          (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          (dropout): Dropout(p=0.1, inplace=False)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (pooler): BertPooler(\n",
       "    (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "    (activation): Tanh()\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:40:40.136242Z",
     "start_time": "2025-03-15T14:40:40.125617Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_embeddings(sentences, layer=-2):\n",
    "    # 文本预处理\n",
    "    encoded_input = W_tokenizer(\n",
    "        sentences,\n",
    "        padding=True,\n",
    "        truncation=True,\n",
    "        max_length=512,\n",
    "        return_tensors='pt'\n",
    "    )\n",
    "    \n",
    "    # 模型前向计算\n",
    "    with torch.no_grad():\n",
    "        outputs = W_model(**encoded_input)\n",
    "    \n",
    "    # 获取指定层的隐藏状态\n",
    "    hidden_states = outputs.hidden_states\n",
    "    selected_layer = hidden_states[layer]  # 形状：(batch_size, seq_len, hidden_dim)\n",
    "    \n",
    "    # 生成注意力掩码\n",
    "    attention_mask = encoded_input['attention_mask'].unsqueeze(-1)\n",
    "    \n",
    "    # 加权平均池化（考虑实际token的权重\n",
    "    sum_embeddings = torch.sum(selected_layer * attention_mask, dim=1)\n",
    "    sum_mask = torch.clamp(attention_mask.sum(dim=1), min=1e-9)\n",
    "    sentence_embeddings = sum_embeddings / sum_mask\n",
    "    \n",
    "    return sentence_embeddings    "
   ],
   "id": "6c5bae076ce1ed0",
   "outputs": [],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:40:40.947309Z",
     "start_time": "2025-03-15T14:40:40.938398Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def process_list(slist):\n",
    "    nlist = [get_embeddings(s) for s in slist]\n",
    "    \n",
    "    return torch.stack(nlist, dim=0)"
   ],
   "id": "2889baefe1eafe5",
   "outputs": [],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:43:56.083079Z",
     "start_time": "2025-03-15T14:43:56.073640Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def image_processing_pipeline(img_path: str) -> torch.Tensor:\n",
    "    \n",
    "    img = cv2.imread(img_path)\n",
    "    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n",
    "    tensor = torch.from_numpy(img.astype(np.float32))\n",
    "    tensor = tensor.unsqueeze(0)  # 添加通道维度 (H,W) -> (C,H,W)\n",
    "    tensor /= 255.0  # 归一化\n",
    "    return tensor"
   ],
   "id": "84a89a9f1b2ae5bf",
   "outputs": [],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:48:41.009911Z",
     "start_time": "2025-03-15T14:48:40.989716Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MultimodalProcessor:\n",
    "    \"\"\"改进版多模态处理器\"\"\"\n",
    "    def __init__(self, root_dir: str, text_processor: Callable[[list], torch.Tensor]):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            root_dir: 数据根目录\n",
    "            text_processor: 用户自定义文本处理函数\n",
    "        \"\"\"\n",
    "        self.root_dir = root_dir\n",
    "        self.text_processor = text_processor\n",
    "        self.dataset_path = os.path.join(root_dir, \"multimodal_dataset3.h5\")\n",
    "        \n",
    "    def _process_image(self, img_path: str) -> torch.Tensor:\n",
    "        \"\"\"改进图像处理流程（添加尺寸校验）\"\"\"\n",
    "        tensor = image_processing_pipeline(img_path)\n",
    "        # 统一尺寸为512x512（参考网页5形状调整方法）\n",
    "        if tensor.shape[-2:] != (512, 512):\n",
    "            tensor = torch.nn.functional.interpolate(\n",
    "                tensor.unsqueeze(0), size=(512,512), \n",
    "                mode='bilinear', align_corners=False\n",
    "            ).squeeze(0)\n",
    "        return tensor\n",
    "\n",
    "    def _process_folder(self, subdir: str) -> dict:\n",
    "        \"\"\"处理单个子文件夹\"\"\"\n",
    "        dir_path = os.path.join(self.root_dir, subdir)\n",
    "        #print(dir_path)\n",
    "        \n",
    "        # 文件分类（沿用原模块逻辑）\n",
    "        jpg_files, txt_files = [], []\n",
    "        for f in os.listdir(dir_path):\n",
    "            path = os.path.join(dir_path, f)\n",
    "            #print(path)\n",
    "            if f.endswith(\".txt\"): txt_files.append(path)\n",
    "            elif f.lower().endswith(\".jpg\"): jpg_files.append(path)\n",
    "        \n",
    "        if txt_files == []:\n",
    "            return {}\n",
    "        # 图像处理（改进尺寸处理）\n",
    "        img_tensors = [self._process_image(p) for p in sorted(jpg_files)[:2]]\n",
    "        \n",
    "        # 文本处理（新增维度校验）\n",
    "        with open(txt_files[0], 'r',encoding='utf-8') as f:\n",
    "            text_data = json.load(f)\n",
    "        text_tensor = self.text_processor(list(text_data.values()))\n",
    "        text_tensor.squeeze_(1)\n",
    "        #print(text_tensor.shape)\n",
    "        \n",
    "        if text_tensor.shape != (10, 768):\n",
    "            return {}\n",
    "            #text_tensor = text_tensor.view(10, 768)  # 形状强制对齐（参考网页5）\n",
    "        \n",
    "        return {\n",
    "            \"images\": torch.stack(img_tensors),\n",
    "            \"text\": text_tensor,\n",
    "            \"folder\": subdir\n",
    "        }\n",
    "\n",
    "    def build_dataset(self, save_path: str = \"C:/Users/Lenovo/Desktop/DC/dataset/multimodal_dataset3.h5\"):\n",
    "        \"\"\"构建数据集并保存到指定路径（改进版）\"\"\"\n",
    "        # 默认保存路径为根目录下的数据集文件\n",
    "        default_path = os.path.join(self.root_dir, \"multimodal_dataset.h5\")\n",
    "        final_save_path = save_path if save_path else default_path\n",
    "        \n",
    "        # 创建目标目录（参考网页1路径校验逻辑）\n",
    "        target_dir = os.path.dirname(final_save_path)\n",
    "        if not os.path.exists(target_dir):\n",
    "            os.makedirs(target_dir, exist_ok=True)  # 自动创建多级目录\n",
    "        \n",
    "        # 构建HDF5数据集（参考网页6数据存储策略）\n",
    "        with h5py.File(final_save_path, 'w') as hf:\n",
    "            for subdir in os.listdir(self.root_dir):\n",
    "                dir_path = os.path.join(self.root_dir, subdir)\n",
    "                if not os.path.isdir(dir_path):\n",
    "                    continue\n",
    "                \n",
    "                data = self._process_folder(subdir)\n",
    "                if data == {}:\n",
    "                    continue\n",
    "                group = hf.create_group(f\"sample_{subdir}\")\n",
    "                \n",
    "                # 存储数据（参考网页7元数据标注方法）\n",
    "                group.create_dataset(\"image1\", data=data[\"images\"][0].numpy(),\n",
    "                                   compression=\"gzip\", chunks=True)\n",
    "                group.create_dataset(\"image2\", data=data[\"images\"][1].numpy(),\n",
    "                                   compression=\"gzip\", chunks=True)\n",
    "                group.create_dataset(\"text\", data=data[\"text\"].numpy(),\n",
    "                                   compression=\"gzip\", chunks=True)\n",
    "                group.attrs[\"folder\"] = data[\"folder\"]  # 附加元数据"
   ],
   "id": "dab4d3f649104b37",
   "outputs": [],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:52:24.546623Z",
     "start_time": "2025-03-15T14:48:41.643307Z"
    }
   },
   "cell_type": "code",
   "source": [
    "pro = MultimodalProcessor(\"C:\\\\Users\\\\Lenovo\\\\Desktop\\\\DC\\\\request\\\\2\", process_list)\n",
    "pro.build_dataset()"
   ],
   "id": "b01bf6ef591d8a17",
   "outputs": [],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-15T14:37:30.153687Z",
     "start_time": "2025-03-15T14:37:30.152650Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MultimodalDataset(Dataset):\n",
    "    \"\"\"可扩展数据集加载器（参考网页6分割策略）\"\"\"\n",
    "    def __init__(self, h5_path: str):\n",
    "        self.h5 = h5py.File(h5_path, 'r')\n",
    "        self.keys = list(self.h5.keys())\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.keys)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        group = self.h5[self.keys[idx]]\n",
    "        return {\n",
    "            \"images\": (\n",
    "                torch.from_numpy(group[\"image1\"][:]),\n",
    "                torch.from_numpy(group[\"image2\"][:])\n",
    "            ),\n",
    "            \"text\": torch.from_numpy(group[\"text\"][:]),\n",
    "            \"meta\": {\"folder\": group.attrs[\"folder\"]}\n",
    "        }"
   ],
   "id": "cb2dd367d3ceb5a6",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "4f5580cca8f9c288"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
