

# 数据集下载和准备
def download_dataset(data_dir='flickr8k'):
    """下载Flickr8k数据集"""
    os.makedirs(data_dir, exist_ok=True)
    
    # 下载图像数据
    images_url = "https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_Dataset.zip"
    images_zip = os.path.join(data_dir, "Flickr8k_Dataset.zip")
    
    if not os.path.exists(images_zip):
        print("下载图像数据...")
        response = requests.get(images_url, stream=True)
        total_size = int(response.headers.get('content-length', 0))
        
        with open(images_zip, 'wb') as f, tqdm(
            desc=images_zip,
            total=total_size,
            unit='B',
            unit_scale=True,
            unit_divisor=1024,
        ) as bar:
            for data in response.iter_content(chunk_size=1024):
                if data:
                    f.write(data)
                    bar.update(len(data))
    
    # 解压图像数据
    images_dir = os.path.join(data_dir, "Flicker8k_Dataset")
    if not os.path.exists(images_dir):
        print("解压图像数据...")
        with zipfile.ZipFile(images_zip, 'r') as zip_ref:
            zip_ref.extractall(data_dir)
    
    # 下载标注数据
    captions_url = "https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_text.zip"
    captions_zip = os.path.join(data_dir, "Flickr8k_text.zip")
    
    if not os.path.exists(captions_zip):
        print("下载标注数据...")
        response = requests.get(captions_url, stream=True)
        total_size = int(response.headers.get('content-length', 0))
        
        with open(captions_zip, 'wb') as f, tqdm(
            desc=captions_zip,
            total=total_size,
            unit='B',
            unit_scale=True,
            unit_divisor=1024,
        ) as bar:
            for data in response.iter_content(chunk_size=1024):
                if data:
                    f.write(data)
                    bar.update(len(data))
    
    # 解压标注数据
    captions_file = os.path.join(data_dir, "Flickr8k.token.txt")
    if not os.path.exists(captions_file):
        print("解压标注数据...")
        with zipfile.ZipFile(captions_zip, 'r') as zip_ref:
            zip_ref.extractall(data_dir)
    
    print("数据集准备完成!")
    return data_dir, images_dir, captions_file

# 文本处理工具类
class Vocabulary:
    def __init__(self, freq_threshold=5):
        self.freq_threshold = freq_threshold
        self.itos = {0: "<PAD>", 1: "<SOS>", 2: "<EOS>", 3: "<UNK>"}
        self.stoi = {"<PAD>": 0, "<SOS>": 1, "<EOS>": 2, "<UNK>": 3}
        self.freq = Counter()
    
    def __len__(self):
        return len(self.itos)
    
    @staticmethod
    def tokenize(text):
        # 预处理文本
        text = text.lower()
        text = re.sub(r'[^\w\s]', '', text)
        return word_tokenize(text)
    
    def build_vocabulary(self, caption_list):
        for caption in caption_list:
            for word in self.tokenize(caption):
                self.freq[word] += 1
        
        # 只保留频率高于阈值的词
        words = [word for word, freq in self.freq.items() if freq >= self.freq_threshold]
        
        # 为每个词分配索引
        for idx, word in enumerate(words, 4):  # 从4开始，因为0-3已经被保留
            self.itos[idx] = word
            self.stoi[word] = idx
    
    def numericalize(self, text):
        tokenized_text = self.tokenize(text)
        return [
            self.stoi[token] if token in self.stoi else self.stoi["<UNK>"]
            for token in tokenized_text
        ]

# 图文Caption数据集
class Flickr8kDataset(Dataset):
    def __init__(self, images_dir, captions_file, transform=None, freq_threshold=5):
        self.images_dir = images_dir
        self.transform = transform
        
        # 读取标注文件
        with open(captions_file, 'r') as f:
            captions_data = f.readlines()
        
        # 解析标注数据
        self.captions = []
        self.image_ids = []
        
        for line in captions_data:
            line = line.strip()
            if not line:
                continue
                
            parts = line.split('\t')
            image_id = parts[0].split('#')[0]
            caption = parts[1]
            
            self.image_ids.append(image_id)
            self.captions.append(caption)
        
        # 构建词汇表
        self.vocab = Vocabulary(freq_threshold)
        self.vocab.build_vocabulary(self.captions)
        
        # 创建图像ID到索引的映射
        self.image_id_to_indices = {}
        for idx, image_id in enumerate(self.image_ids):
            if image_id not in self.image_id_to_indices:
                self.image_id_to_indices[image_id] = []
            self.image_id_to_indices[image_id].append(idx)
        
        # 获取唯一的图像ID列表
        self.unique_image_ids = list(self.image_id_to_indices.keys())
    
    def __len__(self):
        return len(self.unique_image_ids)
    
    def __getitem__(self, idx):
        image_id = self.unique_image_ids[idx]
        
        # 加载图像
        img_path = os.path.join(self.images_dir, image_id)
        image = Image.open(img_path).convert("RGB")
        
        if self.transform:
            image = self.transform(image)
        
        # 随机选择该图像的一个标注
        caption_indices = self.image_id_to_indices[image_id]
        caption_idx = np.random.choice(caption_indices)
        caption = self.captions[caption_idx]
        
        # 将文本转换为数字序列
        numericalized_caption = [self.vocab.stoi["<SOS>"]]
        numericalized_caption += self.vocab.numericalize(caption)
        numericalized_caption.append(self.vocab.stoi["<EOS>"])
        
        return image, torch.tensor(numericalized_caption)

# 用于填充批次的函数
class CapsCollate:
    def __init__(self, pad_idx, batch_first=True):
        self.pad_idx = pad_idx
        self.batch_first = batch_first
    
    def __call__(self, batch):
        imgs = [item[0].unsqueeze(0) for item in batch]
        imgs = torch.cat(imgs, dim=0)
        
        targets = [item[1] for item in batch]
        targets = nn.utils.rnn.pad_sequence(
            targets, batch_first=self.batch_first, padding_value=self.pad_idx
        )
        
        return imgs, targets

# 图像编码器
class EncoderCNN(nn.Module):
    def __init__(self, embed_size, train_CNN=False):
        super(EncoderCNN, self).__init__()
        self.train_CNN = train_CNN
        
        # 加载预训练的ResNet18
        resnet = models.resnet18(pretrained=True)
        
        # 移除最后的全连接层
        modules = list(resnet.children())[:-1]
        self.resnet = nn.Sequential(*modules)
        
        # 全连接层将特征映射到嵌入空间
        self.embed = nn.Linear(resnet.fc.in_features, embed_size)
        self.bn = nn.BatchNorm1d(embed_size)
        
        # 冻结ResNet参数
        self.fine_tune()
    
    def forward(self, images):
        # 提取图像特征
        with torch.no_grad():
            features = self.resnet(images)
        
        # 展平特征
        features = features.view(features.size(0), -1)
        
        # 映射到嵌入空间
        features = self.embed(features)
        features = self.bn(features)
        
        return features
    
    def fine_tune(self):
        """允许或阻止ResNet的卷积层参数更新"""
        for param in self.resnet.parameters():
            param.requires_grad = self.train_CNN

# 文本解码器
class DecoderRNN(nn.Module):
    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
        super(DecoderRNN, self).__init__()
        
        # 词嵌入层
        self.embed = nn.Embedding(vocab_size, embed_size)
        
        # LSTM层
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
        
        # 全连接层，从隐藏状态到词汇表
        self.linear = nn.Linear(hidden_size, vocab_size)
        
        # 初始化权重
        self.init_weights()
    
    def init_weights(self):
        """初始化可学习的权重"""
        self.embed.weight.data.uniform_(-0.1, 0.1)
        self.linear.weight.data.uniform_(-0.1, 0.1)
        self.linear.bias.data.fill_(0)
    
    def forward(self, features, captions):
        """
        前向传播
        
        参数:
            features: 图像特征，形状为 (batch_size, embed_size)
            captions: 文本标注，形状为 (batch_size, caption_length)
        
        返回:
            outputs: 预测的词概率，形状为 (batch_size, caption_length-1, vocab_size)
        """
        # 移除最后的标记 (<EOS>)
        captions = captions[:, :-1]
        
        # 词嵌入
        embeddings = self.embed(captions)
        
        # 将图像特征与词嵌入连接
        embeddings = torch.cat((features.unsqueeze(1), embeddings), dim=1)
        
        # LSTM前向传播
        hiddens, _ = self.lstm(embeddings)
        
        # 预测下一个词
        outputs = self.linear(hiddens)
        
        return outputs
    
    def sample(self, features, max_len=20):
        """
        从图像特征生成文本描述
        
        参数:
            features: 图像特征，形状为 (1, embed_size)
            max_len: 生成文本的最大长度
        
        返回:
            sampled_ids: 生成的词索引列表
        """
        sampled_ids = []
        inputs = features.unsqueeze(1)  # (1, 1, embed_size)
        
        for _ in range(max_len):
            hiddens, states = self.lstm(inputs)  # hiddens: (1, 1, hidden_size)
            outputs = self.linear(hiddens.squeeze(1))  # outputs: (1, vocab_size)
            _, predicted = outputs.max(1)  # predicted: (1)
            
            sampled_ids.append(predicted.item())
            
            # 如果预测到 <EOS>，则停止生成
            if predicted.item() == self.embed.num_embeddings - 1:
                break
            
            # 准备下一个输入
            inputs = self.embed(predicted).unsqueeze(1)  # (1, 1, embed_size)
        
        return sampled_ids

# 图文Caption模型
class ImageCaptioningModel(nn.Module):
    def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
        super(ImageCaptioningModel, self).__init__()
        self.encoder = EncoderCNN(embed_size)
        self.decoder = DecoderRNN(embed_size, hidden_size, vocab_size, num_layers)
    
    def forward(self, images, captions):
        features = self.encoder(images)
        outputs = self.decoder(features, captions)
        return outputs
    
    def caption_image(self, image, vocab, max_length=50):
        """为图像生成描述"""
        self.eval()
        with torch.no_grad():
            if isinstance(image, str):
                # 从文件加载图像
                image = Image.open(image).convert("RGB")
                transform = transforms.Compose([
                    transforms.Resize((224, 224)),
                    transforms.ToTensor(),
                    transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
                ])
                image = transform(image).unsqueeze(0)
            
            features = self.encoder(image)
            sampled_ids = self.decoder.sample(features, max_length)
        
        # 将索引转换回单词
        sampled_caption = []
        for word_id in sampled_ids:
            word = vocab.itos[word_id]
            sampled_caption.append(word)
            if word == "<EOS>":
                break
        
        # 移除特殊标记
        sentence = " ".join(sampled_caption[1:-1])  # 移除 <SOS> 和 <EOS>
        
        return sentence

# 训练函数
def train_model(model, train_loader, criterion, optimizer, num_epochs, device):
    model.train()
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        
        for images, captions in tqdm(train_loader):
            # 将数据移至GPU
            images = images.to(device)
            captions = captions.to(device)
            
            # 前向传播
            outputs = model(images, captions)
            
            # 计算损失
            loss = criterion(
                outputs.reshape(-1, outputs.shape[2]),
                captions.reshape(-1)
            )
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
        
        # 打印每个epoch的损失
        epoch_loss = running_loss / len(train_loader)
        print(f"Epoch {epoch+1}/{num_epochs}, Loss: {epoch_loss:.4f}")

# 主函数
def main():
    # 设置设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")
    
    # 下载数据集
    data_dir, images_dir, captions_file = download_dataset()
    
    # 定义图像变换
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])
    
    # 创建数据集
    dataset = Flickr8kDataset(images_dir, captions_file, transform=transform)
    
    # 分割数据集
    train_size = int(0.8 * len(dataset))
    val_size = len(dataset) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
    
    # 创建数据加载器
    pad_idx = dataset.vocab.stoi["<PAD>"]
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=32,
        shuffle=True,
        collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True)
    )
    
    val_loader = DataLoader(
        dataset=val_dataset,
        batch_size=32,
        shuffle=False,
        collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True)
    )
    
    # 模型参数
    embed_size = 256
    hidden_size = 256
    vocab_size = len(dataset.vocab)
    num_layers = 1
    
    # 创建模型
    model = ImageCaptioningModel(embed_size, hidden_size, vocab_size, num_layers).to(device)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    # 训练模型
    num_epochs = 5  # 实际训练中可能需要更多epoch
    train_model(model, train_loader, criterion, optimizer, num_epochs, device)
    
    # 保存模型
    torch.save(model.state_dict(), "image_captioning_model.pth")
    print("模型已保存为 image_captioning_model.pth")
    
    # 测试模型
    test_image = os.path.join(images_dir, dataset.unique_image_ids[0])
    caption = model.caption_image(test_image, dataset.vocab)
    print(f"图像: {test_image}")
    print(f"生成的描述: {caption}")

if __name__ == "__main__":
    main()
