{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "import zipfile\n",
    "import requests\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import transforms, models\n",
    "from PIL import Image\n",
    "from tqdm import tqdm\n",
    "import nltk\n",
    "from nltk.tokenize import word_tokenize\n",
    "from collections import Counter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "nltk.download('punkt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 数据集下载和准备\n",
    "def download_dataset(data_dir='flickr8k'):\n",
    "    \"\"\"下载Flickr8k数据集\"\"\"\n",
    "    os.makedirs(data_dir, exist_ok=True)\n",
    "    \n",
    "    # 下载图像数据\n",
    "    images_url = \"https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_Dataset.zip\"\n",
    "    images_zip = os.path.join(data_dir, \"Flickr8k_Dataset.zip\")\n",
    "    \n",
    "    if not os.path.exists(images_zip):\n",
    "        print(\"下载图像数据...\")\n",
    "        response = requests.get(images_url, stream=True)\n",
    "        total_size = int(response.headers.get('content-length', 0))\n",
    "        \n",
    "        with open(images_zip, 'wb') as f, tqdm(\n",
    "            desc=images_zip,\n",
    "            total=total_size,\n",
    "            unit='B',\n",
    "            unit_scale=True,\n",
    "            unit_divisor=1024,\n",
    "        ) as bar:\n",
    "            for data in response.iter_content(chunk_size=1024):\n",
    "                if data:\n",
    "                    f.write(data)\n",
    "                    bar.update(len(data))\n",
    "    \n",
    "    # 解压图像数据\n",
    "    images_dir = os.path.join(data_dir, \"Flicker8k_Dataset\")\n",
    "    if not os.path.exists(images_dir):\n",
    "        print(\"解压图像数据...\")\n",
    "        with zipfile.ZipFile(images_zip, 'r') as zip_ref:\n",
    "            zip_ref.extractall(data_dir)\n",
    "    \n",
    "    # 下载标注数据\n",
    "    captions_url = \"https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_text.zip\"\n",
    "    captions_zip = os.path.join(data_dir, \"Flickr8k_text.zip\")\n",
    "    \n",
    "    if not os.path.exists(captions_zip):\n",
    "        print(\"下载标注数据...\")\n",
    "        response = requests.get(captions_url, stream=True)\n",
    "        total_size = int(response.headers.get('content-length', 0))\n",
    "        \n",
    "        with open(captions_zip, 'wb') as f, tqdm(\n",
    "            desc=captions_zip,\n",
    "            total=total_size,\n",
    "            unit='B',\n",
    "            unit_scale=True,\n",
    "            unit_divisor=1024,\n",
    "        ) as bar:\n",
    "            for data in response.iter_content(chunk_size=1024):\n",
    "                if data:\n",
    "                    f.write(data)\n",
    "                    bar.update(len(data))\n",
    "    \n",
    "    # 解压标注数据\n",
    "    captions_file = os.path.join(data_dir, \"Flickr8k.token.txt\")\n",
    "    if not os.path.exists(captions_file):\n",
    "        print(\"解压标注数据...\")\n",
    "        with zipfile.ZipFile(captions_zip, 'r') as zip_ref:\n",
    "            zip_ref.extractall(data_dir)\n",
    "    \n",
    "    print(\"数据集准备完成!\")\n",
    "    return data_dir, images_dir, captions_file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 文本处理工具类\n",
    "class Vocabulary:\n",
    "    def __init__(self, freq_threshold=5):\n",
    "        self.freq_threshold = freq_threshold\n",
    "        self.itos = {0: \"<PAD>\", 1: \"<SOS>\", 2: \"<EOS>\", 3: \"<UNK>\"}\n",
    "        self.stoi = {\"<PAD>\": 0, \"<SOS>\": 1, \"<EOS>\": 2, \"<UNK>\": 3}\n",
    "        self.freq = Counter()\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.itos)\n",
    "    \n",
    "    @staticmethod\n",
    "    def tokenize(text):\n",
    "        # 预处理文本\n",
    "        text = text.lower()\n",
    "        text = re.sub(r'[^\\w\\s]', '', text)\n",
    "        return word_tokenize(text)\n",
    "    \n",
    "    def build_vocabulary(self, caption_list):\n",
    "        for caption in caption_list:\n",
    "            for word in self.tokenize(caption):\n",
    "                self.freq[word] += 1\n",
    "        \n",
    "        # 只保留频率高于阈值的词\n",
    "        words = [word for word, freq in self.freq.items() if freq >= self.freq_threshold]\n",
    "        \n",
    "        # 为每个词分配索引\n",
    "        for idx, word in enumerate(words, 4):  # 从4开始，因为0-3已经被保留\n",
    "            self.itos[idx] = word\n",
    "            self.stoi[word] = idx\n",
    "    \n",
    "    def numericalize(self, text):\n",
    "        tokenized_text = self.tokenize(text)\n",
    "        return [\n",
    "            self.stoi[token] if token in self.stoi else self.stoi[\"<UNK>\"]\n",
    "            for token in tokenized_text\n",
    "        ]\n",
    "\n",
    "# 图文Caption数据集\n",
    "class Flickr8kDataset(Dataset):\n",
    "    def __init__(self, images_dir, captions_file, transform=None, freq_threshold=5):\n",
    "        self.images_dir = images_dir\n",
    "        self.transform = transform\n",
    "        \n",
    "        # 读取标注文件\n",
    "        with open(captions_file, 'r') as f:\n",
    "            captions_data = f.readlines()\n",
    "        \n",
    "        # 解析标注数据\n",
    "        self.captions = []\n",
    "        self.image_ids = []\n",
    "        \n",
    "        for line in captions_data:\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "                \n",
    "            parts = line.split('\\t')\n",
    "            image_id = parts[0].split('#')[0]\n",
    "            caption = parts[1]\n",
    "            \n",
    "            self.image_ids.append(image_id)\n",
    "            self.captions.append(caption)\n",
    "        \n",
    "        # 构建词汇表\n",
    "        self.vocab = Vocabulary(freq_threshold)\n",
    "        self.vocab.build_vocabulary(self.captions)\n",
    "        \n",
    "        # 创建图像ID到索引的映射\n",
    "        self.image_id_to_indices = {}\n",
    "        for idx, image_id in enumerate(self.image_ids):\n",
    "            if image_id not in self.image_id_to_indices:\n",
    "                self.image_id_to_indices[image_id] = []\n",
    "            self.image_id_to_indices[image_id].append(idx)\n",
    "        \n",
    "        # 获取唯一的图像ID列表\n",
    "        self.unique_image_ids = list(self.image_id_to_indices.keys())\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.unique_image_ids)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        image_id = self.unique_image_ids[idx]\n",
    "        \n",
    "        # 加载图像\n",
    "        img_path = os.path.join(self.images_dir, image_id)\n",
    "        image = Image.open(img_path).convert(\"RGB\")\n",
    "        \n",
    "        if self.transform:\n",
    "            image = self.transform(image)\n",
    "        \n",
    "        # 随机选择该图像的一个标注\n",
    "        caption_indices = self.image_id_to_indices[image_id]\n",
    "        caption_idx = np.random.choice(caption_indices)\n",
    "        caption = self.captions[caption_idx]\n",
    "        \n",
    "        # 将文本转换为数字序列\n",
    "        numericalized_caption = [self.vocab.stoi[\"<SOS>\"]]\n",
    "        numericalized_caption += self.vocab.numericalize(caption)\n",
    "        numericalized_caption.append(self.vocab.stoi[\"<EOS>\"])\n",
    "        \n",
    "        return image, torch.tensor(numericalized_caption)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用于填充批次的函数\n",
    "class CapsCollate:\n",
    "    def __init__(self, pad_idx, batch_first=True):\n",
    "        self.pad_idx = pad_idx\n",
    "        self.batch_first = batch_first\n",
    "    \n",
    "    def __call__(self, batch):\n",
    "        imgs = [item[0].unsqueeze(0) for item in batch]\n",
    "        imgs = torch.cat(imgs, dim=0)\n",
    "        \n",
    "        targets = [item[1] for item in batch]\n",
    "        targets = nn.utils.rnn.pad_sequence(\n",
    "            targets, batch_first=self.batch_first, padding_value=self.pad_idx\n",
    "        )\n",
    "        \n",
    "        return imgs, targets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 图像编码器\n",
    "class EncoderCNN(nn.Module):\n",
    "    def __init__(self, embed_size, train_CNN=False):\n",
    "        super(EncoderCNN, self).__init__()\n",
    "        self.train_CNN = train_CNN\n",
    "        \n",
    "        # 加载预训练的ResNet18\n",
    "        resnet = models.resnet18(pretrained=True)\n",
    "        \n",
    "        # 移除最后的全连接层\n",
    "        modules = list(resnet.children())[:-1]\n",
    "        self.resnet = nn.Sequential(*modules)\n",
    "        \n",
    "        # 全连接层将特征映射到嵌入空间\n",
    "        self.embed = nn.Linear(resnet.fc.in_features, embed_size)\n",
    "        self.bn = nn.BatchNorm1d(embed_size)\n",
    "        \n",
    "        # 冻结ResNet参数\n",
    "        self.fine_tune()\n",
    "    \n",
    "    def forward(self, images):\n",
    "        # 提取图像特征\n",
    "        with torch.no_grad():\n",
    "            features = self.resnet(images)\n",
    "        \n",
    "        # 展平特征\n",
    "        features = features.view(features.size(0), -1)\n",
    "        \n",
    "        # 映射到嵌入空间\n",
    "        features = self.embed(features)\n",
    "        features = self.bn(features)\n",
    "        \n",
    "        return features\n",
    "    \n",
    "    def fine_tune(self):\n",
    "        \"\"\"允许或阻止ResNet的卷积层参数更新\"\"\"\n",
    "        for param in self.resnet.parameters():\n",
    "            param.requires_grad = self.train_CNN\n",
    "\n",
    "# 文本解码器\n",
    "class DecoderRNN(nn.Module):\n",
    "    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):\n",
    "        super(DecoderRNN, self).__init__()\n",
    "        \n",
    "        # 词嵌入层\n",
    "        self.embed = nn.Embedding(vocab_size, embed_size)\n",
    "        \n",
    "        # LSTM层\n",
    "        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n",
    "        \n",
    "        # 全连接层，从隐藏状态到词汇表\n",
    "        self.linear = nn.Linear(hidden_size, vocab_size)\n",
    "        \n",
    "        # 初始化权重\n",
    "        self.init_weights()\n",
    "    \n",
    "    def init_weights(self):\n",
    "        \"\"\"初始化可学习的权重\"\"\"\n",
    "        self.embed.weight.data.uniform_(-0.1, 0.1)\n",
    "        self.linear.weight.data.uniform_(-0.1, 0.1)\n",
    "        self.linear.bias.data.fill_(0)\n",
    "    \n",
    "    def forward(self, features, captions):\n",
    "        \"\"\"\n",
    "        前向传播\n",
    "        \n",
    "        参数:\n",
    "            features: 图像特征，形状为 (batch_size, embed_size)\n",
    "            captions: 文本标注，形状为 (batch_size, caption_length)\n",
    "        \n",
    "        返回:\n",
    "            outputs: 预测的词概率，形状为 (batch_size, caption_length-1, vocab_size)\n",
    "        \"\"\"\n",
    "        # 移除最后的标记 (<EOS>)\n",
    "        captions = captions[:, :-1]\n",
    "        \n",
    "        # 词嵌入\n",
    "        embeddings = self.embed(captions)\n",
    "        \n",
    "        # 将图像特征与词嵌入连接\n",
    "        embeddings = torch.cat((features.unsqueeze(1), embeddings), dim=1)\n",
    "        \n",
    "        # LSTM前向传播\n",
    "        hiddens, _ = self.lstm(embeddings)\n",
    "        \n",
    "        # 预测下一个词\n",
    "        outputs = self.linear(hiddens)\n",
    "        \n",
    "        return outputs\n",
    "    \n",
    "    def sample(self, features, max_len=20):\n",
    "        \"\"\"\n",
    "        从图像特征生成文本描述\n",
    "        \n",
    "        参数:\n",
    "            features: 图像特征，形状为 (1, embed_size)\n",
    "            max_len: 生成文本的最大长度\n",
    "        \n",
    "        返回:\n",
    "            sampled_ids: 生成的词索引列表\n",
    "        \"\"\"\n",
    "        sampled_ids = []\n",
    "        inputs = features.unsqueeze(1)  # (1, 1, embed_size)\n",
    "        \n",
    "        for _ in range(max_len):\n",
    "            hiddens, states = self.lstm(inputs)  # hiddens: (1, 1, hidden_size)\n",
    "            outputs = self.linear(hiddens.squeeze(1))  # outputs: (1, vocab_size)\n",
    "            _, predicted = outputs.max(1)  # predicted: (1)\n",
    "            \n",
    "            sampled_ids.append(predicted.item())\n",
    "            \n",
    "            # 如果预测到 <EOS>，则停止生成\n",
    "            if predicted.item() == self.embed.num_embeddings - 1:\n",
    "                break\n",
    "            \n",
    "            # 准备下一个输入\n",
    "            inputs = self.embed(predicted).unsqueeze(1)  # (1, 1, embed_size)\n",
    "        \n",
    "        return sampled_ids\n",
    "\n",
    "# 图文Caption模型\n",
    "class ImageCaptioningModel(nn.Module):\n",
    "    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):\n",
    "        super(ImageCaptioningModel, self).__init__()\n",
    "        self.encoder = EncoderCNN(embed_size)\n",
    "        self.decoder = DecoderRNN(embed_size, hidden_size, vocab_size, num_layers)\n",
    "    \n",
    "    def forward(self, images, captions):\n",
    "        features = self.encoder(images)\n",
    "        outputs = self.decoder(features, captions)\n",
    "        return outputs\n",
    "    \n",
    "    def caption_image(self, image, vocab, max_length=50):\n",
    "        \"\"\"为图像生成描述\"\"\"\n",
    "        self.eval()\n",
    "        with torch.no_grad():\n",
    "            if isinstance(image, str):\n",
    "                # 从文件加载图像\n",
    "                image = Image.open(image).convert(\"RGB\")\n",
    "                transform = transforms.Compose([\n",
    "                    transforms.Resize((224, 224)),\n",
    "                    transforms.ToTensor(),\n",
    "                    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n",
    "                ])\n",
    "                image = transform(image).unsqueeze(0)\n",
    "            \n",
    "            features = self.encoder(image)\n",
    "            sampled_ids = self.decoder.sample(features, max_length)\n",
    "        \n",
    "        # 将索引转换回单词\n",
    "        sampled_caption = []\n",
    "        for word_id in sampled_ids:\n",
    "            word = vocab.itos[word_id]\n",
    "            sampled_caption.append(word)\n",
    "            if word == \"<EOS>\":\n",
    "                break\n",
    "        \n",
    "        # 移除特殊标记\n",
    "        sentence = \" \".join(sampled_caption[1:-1])  # 移除 <SOS> 和 <EOS>\n",
    "        \n",
    "        return sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练函数\n",
    "def train_model(model, train_loader, criterion, optimizer, num_epochs, device):\n",
    "    model.train()\n",
    "    \n",
    "    for epoch in range(num_epochs):\n",
    "        running_loss = 0.0\n",
    "        \n",
    "        for images, captions in tqdm(train_loader):\n",
    "            # 将数据移至GPU\n",
    "            images = images.to(device)\n",
    "            captions = captions.to(device)\n",
    "            \n",
    "            # 前向传播\n",
    "            outputs = model(images, captions)\n",
    "            \n",
    "            # 计算损失\n",
    "            loss = criterion(\n",
    "                outputs.reshape(-1, outputs.shape[2]),\n",
    "                captions.reshape(-1)\n",
    "            )\n",
    "            \n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            running_loss += loss.item()\n",
    "        \n",
    "        # 打印每个epoch的损失\n",
    "        epoch_loss = running_loss / len(train_loader)\n",
    "        print(f\"Epoch {epoch+1}/{num_epochs}, Loss: {epoch_loss:.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 主函数\n",
    "def main():\n",
    "    # 设置设备\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(f\"使用设备: {device}\")\n",
    "    \n",
    "    # 下载数据集\n",
    "    data_dir, images_dir, captions_file = download_dataset()\n",
    "    \n",
    "    # 定义图像变换\n",
    "    transform = transforms.Compose([\n",
    "        transforms.Resize((224, 224)),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n",
    "    ])\n",
    "    \n",
    "    # 创建数据集\n",
    "    dataset = Flickr8kDataset(images_dir, captions_file, transform=transform)\n",
    "    \n",
    "    # 分割数据集\n",
    "    train_size = int(0.8 * len(dataset))\n",
    "    val_size = len(dataset) - train_size\n",
    "    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    pad_idx = dataset.vocab.stoi[\"<PAD>\"]\n",
    "    train_loader = DataLoader(\n",
    "        dataset=train_dataset,\n",
    "        batch_size=32,\n",
    "        shuffle=True,\n",
    "        collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True)\n",
    "    )\n",
    "    \n",
    "    val_loader = DataLoader(\n",
    "        dataset=val_dataset,\n",
    "        batch_size=32,\n",
    "        shuffle=False,\n",
    "        collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True)\n",
    "    )\n",
    "    \n",
    "    # 模型参数\n",
    "    embed_size = 256\n",
    "    hidden_size = 256\n",
    "    vocab_size = len(dataset.vocab)\n",
    "    num_layers = 1\n",
    "    \n",
    "    # 创建模型\n",
    "    model = ImageCaptioningModel(embed_size, hidden_size, vocab_size, num_layers).to(device)\n",
    "    \n",
    "    # 定义损失函数和优化器\n",
    "    criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    # 训练模型\n",
    "    num_epochs = 5  # 实际训练中可能需要更多epoch\n",
    "    train_model(model, train_loader, criterion, optimizer, num_epochs, device)\n",
    "    \n",
    "    # 保存模型\n",
    "    torch.save(model.state_dict(), \"image_captioning_model.pth\")\n",
    "    print(\"模型已保存为 image_captioning_model.pth\")\n",
    "    \n",
    "    # 测试模型\n",
    "    test_image = os.path.join(images_dir, dataset.unique_image_ids[0])\n",
    "    caption = model.caption_image(test_image, dataset.vocab)\n",
    "    print(f\"图像: {test_image}\")\n",
    "    print(f\"生成的描述: {caption}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
