{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torchvision import datasets, transforms\n",
    "from torch.utils.data import DataLoader"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对比学习的网络可以同时接受两个输入，这种网络被形象的称呼为Siamese（孪生）网络。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们设计一个可以解决图像问题的CNN网络。这个convnet模块可以帮助模型逐层提取图像的边缘、纹理、形状等特征，为后续的全连接层提供高层次的特征表示。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SiameseNetwork(nn.Module):  \n",
    "    def __init__(self):  \n",
    "        super(SiameseNetwork, self).__init__()  \n",
    "        self.convnet = nn.Sequential(  \n",
    "            nn.Conv2d(1, 64, 10),  \n",
    "            # ReLU激活函数 可以有效克服Sigmoid激活函数的梯度消失问题\n",
    "            nn.ReLU(inplace=True),  \n",
    "            nn.MaxPool2d(2),  \n",
    "            nn.Conv2d(64, 128, 7),  \n",
    "            nn.ReLU(inplace=True),  \n",
    "            nn.MaxPool2d(2),  \n",
    "            nn.Conv2d(128, 128, 4),  \n",
    "            nn.ReLU(inplace=True),  \n",
    "            nn.MaxPool2d(2),  \n",
    "            nn.Conv2d(128, 256, 4),  \n",
    "            nn.ReLU(inplace=True)  \n",
    "        )  \n",
    "  \n",
    "        self.fc = nn.Sequential(  \n",
    "            nn.Linear(256, 4096),  \n",
    "            nn.Sigmoid()  \n",
    "        )  \n",
    "  \n",
    "    def forward_one(self, x):  \n",
    "        x = self.convnet(x)  \n",
    "        x = x.view(x.size(0), -1)  # flatten the tensor  \n",
    "        x = self.fc(x)  \n",
    "        return x  \n",
    "  \n",
    "    def forward(self, input1, input2):  \n",
    "        output1 = self.forward_one(input1)  \n",
    "        output2 = self.forward_one(input2)  \n",
    "        return output1, output2  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "  \n",
    "  \n",
    "# 初始化网络  \n",
    "net = SiameseNetwork()  \n",
    "  \n",
    "# 定义损失函数和优化器  \n",
    "criterion = nn.BCELoss()  \n",
    "optimizer = optim.Adam(net.parameters(), lr=0.001)  \n",
    "  \n",
    "# 加载MNIST数据集  \n",
    "transform = transforms.Compose([transforms.ToTensor()])  \n",
    "train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)  \n",
    "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)  \n",
    "  \n",
    "# 对比学习训练过程  \n",
    "num_epochs = 5  \n",
    "for epoch in range(num_epochs):  \n",
    "    for i, (images1, images2, labels) in enumerate(train_loader):  \n",
    "        # 获取正样本对（相同的数字）  \n",
    "        positive_pair = (images1, images2)  \n",
    "  \n",
    "        # 获取负样本对（不同的数字）  \n",
    "        negative_pair = (images1, images1[labels != labels[0]])  \n",
    "  \n",
    "        # 前向传播  \n",
    "        output1_pos, output2_pos = net(positive_pair[0], positive_pair[1])  \n",
    "        output1_neg, output2_neg = net(negative_pair[0], negative_pair[1])  \n",
    "  \n",
    "        # 计算损失  \n",
    "        loss_contrastive = criterion(torch.relu(1 - torch.abs(output1_pos - output2_pos)), torch.zeros_like(output1_pos)) + \\  \n",
    "                          criterion(torch.relu(torch.abs(output1_neg - output2_neg)), torch.ones_like(output1_neg))  \n",
    "  \n",
    "        # 反向传播和优化  \n",
    "        optimizer.zero_grad()  \n",
    "        loss_contrastive.backward()  \n",
    "        optimizer.step()  \n",
    "  \n",
    "        if (i+1) % 100 == 0:  \n",
    "            print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, len(train_loader), loss_contrastive.item()))  \n",
    "  \n",
    "print('Training finished.')"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
