{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "73005c69-18e7-4118-abdd-c11bd76f9a1f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import librosa\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "import os\n",
    "import os.path as osp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "8d890cc3-c69a-41c5-88e1-b08d0b081b35",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "9a78048a-0dd1-4fae-b839-7f4da85c90f6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "data_path = './datasets'\n",
    "raw_data = './datasets/ASVspoof2019_LA_train/flac'\n",
    "label_path = './datasets/Labels/ASVspoof2019_LA_cm_protocols/ASVspoof2019.LA.cm.train.trn.txt'\n",
    "pos_path = './datasets/Pos'\n",
    "neg_path = './datasets/Neg'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6cbe770b-38e6-4f5a-bf4e-e86ffc0ac33d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "with open(label_path, 'r') as fp:\n",
    "    labels = fp.readlines()\n",
    "\n",
    "for label in labels:\n",
    "    label = label.replace('\\n', '').split(' ')\n",
    "    f_name = label[1] + '.flac'\n",
    "    status = label[4]\n",
    "    if status == 'bonafide':\n",
    "        shutil.move(os.path.join(raw_path, f_name), os.path.join(pos_path, f_name))\n",
    "    else:\n",
    "        shutil.move(os.path.join(raw_path, f_name), os.path.join(neg_path, f_name))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ed1ae5bf-9301-4fdd-b00d-f6432c34596c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Preprocesser:\n",
    "    def __init__(self, interval=5):\n",
    "        self.audio = None\n",
    "        self.sr = 44100\n",
    "        self.interval = interval #5秒\n",
    "\n",
    "    def load_audio(self, audio_path):\n",
    "        self.audio, sr = librosa.load(audio_path, sr=None)\n",
    "        if sr != self.sr:\n",
    "            self.audio = librosa.resample(y = self.audio, orig_sr=sr, target_sr=self.sr)\n",
    "        self.num_sample = self.interval * self.sr\n",
    "        return self.audio, self.sr\n",
    "\n",
    "    #音频切片\n",
    "    def cut_audio(self, audio):\n",
    "        during = len(audio) // self.sr\n",
    "        num_cuts = np.ceil(during / self.interval)\n",
    "        pad_len = int(num_cuts * self.num_sample)\n",
    "\n",
    "        # 如果音频长度比填充长度长2.5秒以上,就多填充一段\n",
    "        if (len(audio) - pad_len) > self.num_sample // 2:\n",
    "            num_cuts += 1\n",
    "            pad_len = int(num_cuts * self.num_sample)\n",
    "            audio = np.pad(self.audio, (0, pad_len), mode='constant')\n",
    "        #如果音频长度比填充长度长2.5秒以下，且音频比填充段长，直接截取\n",
    "        elif (len(audio) - pad_len) < self.num_sample // 2 and len(audio) > pad_len:\n",
    "            audio = audio[:pad_len]\n",
    "            #如果没有填充长度长，就填充\n",
    "        else:\n",
    "            audio = np.pad(self.audio, (0, pad_len), mode='constant')\n",
    "\n",
    "        # 返回切片好的音频\n",
    "        audio_clip = np.array_split(audio, num_cuts)\n",
    "        return audio_clip\n",
    "    def adjust_length(self, audio, target_length):\n",
    "            if len(audio) < target_length:\n",
    "                audio = np.pad(audio, (0, target_length - len(audio)), 'constant')\n",
    "            elif len(audio) > target_length:\n",
    "                audio = audio[:target_length]\n",
    "            return audio\n",
    "\n",
    "    #音频分帧\n",
    "    def frame_audio(self, audio, frame_size, hop_size):\n",
    "        num_samples = len(audio)\n",
    "        num_frames = 1 + int(np.ceil((num_samples - frame_size) / hop_size))\n",
    "    \n",
    "        # 添加零填充，确保音频信号可以被完全分帧\n",
    "        pad_length = num_frames * hop_size + frame_size - num_samples\n",
    "        audio = np.pad(audio, (0, pad_length), mode='constant')\n",
    "    \n",
    "        # 计算每个帧的起始和终止索引\n",
    "        indices = np.tile(np.arange(0, frame_size), (num_frames, 1)) + np.arange(0, num_frames * hop_size, hop_size).reshape(-1, 1)\n",
    "    \n",
    "        # 提取每个帧的音频数据\n",
    "        frames = audio[indices]\n",
    "    \n",
    "        return frames\n",
    "\n",
    "    def do_frame(self, audio_clip):\n",
    "        #对每一份切片的音频进行分帧\n",
    "        frame_audio = self.frame_audio(audio_clip, 512, 256)\n",
    "        return frame_audio\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "10f9104a",
   "metadata": {},
   "outputs": [],
   "source": [
    "class AudioDataset(Dataset):\n",
    "    #传路径，读取正负样本, [1, 0]为正常声音， [0, 1]为机器合成\n",
    "    def __init__(self, path, pos_name, neg_name, train=True, sr=44100):\n",
    "        self.pos_tar = [osp.join(path, pos_name,i) for i in os.listdir(osp.join(path, pos_name))]\n",
    "        self.neg_tar = [osp.join(path, neg_name ,i) for i in os.listdir(osp.join(path, neg_name))]\n",
    "        self.all_file = self.pos_tar + self.neg_tar\n",
    "        self.processer = Preprocesser(interval=5)\n",
    "        self.target_length = sr * self.processer.interval\n",
    "    def __getitem__(self,index):\n",
    "        f_name = self.all_file[index]\n",
    "        label = [1.0, 0] if f_name in self.pos_tar else [0, 1.0]\n",
    "        audio, _ = self.processer.load_audio(f_name)\n",
    "        audio = self.processer.adjust_length(audio, self.target_length)\n",
    "        audio = self.processer.do_frame(audio)\n",
    "        return np.array(audio), np.array(label)\n",
    "    def __len__(self):\n",
    "        return len(self.all_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "431b36c6-5d59-426f-83f2-c921e73c8feb",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SincConv1d(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size, sample_rate=44100.0, min_low_hz=50.0, min_band_hz=50.0):\n",
    "        super(SincConv1d, self).__init__()\n",
    "        self.in_channels = in_channels\n",
    "        self.out_channels = out_channels\n",
    "        self.kernel_size = kernel_size\n",
    "        self.sample_rate = sample_rate\n",
    "        self.min_low_hz = min_low_hz\n",
    "        self.min_band_hz = min_band_hz\n",
    "        \n",
    "        # Initialize filter parameters\n",
    "        self.low_hz = nn.Parameter(torch.Tensor(out_channels).float())\n",
    "        self.band_hz = nn.Parameter(torch.Tensor(out_channels).float())\n",
    "        \n",
    "        # Initialize weights\n",
    "        self.window = torch.hamming_window(kernel_size)\n",
    "        self.n = (torch.arange(0, kernel_size, dtype=torch.float32) - (kernel_size - 1) / 2)\n",
    "        self.kernels = torch.zeros((out_channels, in_channels, kernel_size))  # Updated for in_channels\n",
    "        \n",
    "        self._init_filters()\n",
    "        \n",
    "    def forward(self, x):\n",
    "        self._init_filters()  # Re-initialize filters for each forward pass\n",
    "        filters = self.get_filters()\n",
    "        return F.conv1d(x, filters, stride=1)\n",
    "        \n",
    "    def _init_filters(self):\n",
    "        low = self.min_low_hz + torch.abs(self.low_hz)\n",
    "        high = low + self.min_band_hz + torch.abs(self.band_hz)\n",
    "        band_pass = high - low\n",
    "        \n",
    "        # Calculate filter parameters\n",
    "        self.kernels = torch.mul(torch.sinc(2 * np.pi * low[:, None] * self.n / self.sample_rate), self.window)\n",
    "        self.kernels = torch.div(self.kernels, torch.sum(self.kernels, dim=-1, keepdim=True))  # Normalize\n",
    "        \n",
    "    def get_filters(self):\n",
    "        return self.kernels.view(self.out_channels, self.in_channels, self.kernel_size)  # Updated for in_channels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "fb1b5432-03b2-4d4e-ad89-924a4ccaa709",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Transpose(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "    def forward(self, x, d1, d2):\n",
    "        return x.transpose(d1, d2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a027dd0e-8d35-446b-8aac-c6bead2c07db",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SincConv(nn.Module):\n",
    "    def __init__(self):\n",
    "        #(4, 861, 512)\n",
    "        super(SincConv, self).__init__()\n",
    "        self.k = nn.Parameter(torch.rand(1, 2))\n",
    "        self.conv_layers = nn.Sequential(\n",
    "            # SincConv1d(in_channels=861, out_channels=60, kernel_size=101),\n",
    "            nn.Conv1d(861, 60, kernel_size=101),\n",
    "            nn.MaxPool1d(kernel_size=512 -101 + 1),\n",
    "            nn.ReLU() ,\n",
    "            #输出为(4, 60, 1)\n",
    "        )\n",
    "        self.activation_layers = nn.Sequential(\n",
    "            nn.Linear(1, 1024),\n",
    "            nn.Sigmoid(),\n",
    "            nn.Linear(1024, 1),\n",
    "            nn.BatchNorm1d(60),\n",
    "            nn.Sigmoid(),\n",
    "            # 输出为 (batch_size, channel_size, feature_size) 即 (4, 60, 1)\n",
    "        )\n",
    "        self.proj = nn.Linear(60, 1)\n",
    "\n",
    "    \n",
    "    def forward(self, x):\n",
    "            x1 = self.conv_layers(x)\n",
    "            x2 = self.activation_layers(x1)\n",
    "            y = x1 @ x2.transpose(1, 2)\n",
    "            return y\n",
    "        #(4, 60, 60)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "236b4746-be79-4400-a2b0-381ed7da0183",
   "metadata": {},
   "outputs": [],
   "source": [
    "class FrequencyConv(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.fre_conv = nn.Conv1d(60, 128, kernel_size=1, stride=1)\n",
    "        self.fre_pool = nn.MaxPool1d(kernel_size=1, stride=1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        o1 = self.fre_conv(x) # (4, 128, 60)\n",
    "        y = self.fre_pool(o1)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4f079fca-2654-4dcc-8511-3f1b7df91333",
   "metadata": {},
   "source": [
    "$O = \\frac{{L - K + (K - 1) \\times (D - 1)}}{S} + 1 $"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "30ed78d2-c4a0-4af6-a1f5-8590c4d3dcfa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输出为(1, 161)\n",
    "class TCN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.tcn_list = nn.Sequential(\n",
    "            #公式为 (input - kernel_size + (kernel_size - 1) * (dilation - 1))/stride + 1\n",
    "            nn.Conv1d(128, 64, kernel_size=1, dilation=1),\n",
    "            nn.BatchNorm1d(64),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(64, 32, kernel_size=1, dilation=2),\n",
    "            nn.BatchNorm1d(32),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(32, 16, kernel_size=1, dilation=4),\n",
    "            nn.BatchNorm1d(16),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(16, 8, kernel_size=1, dilation=8),\n",
    "            nn.BatchNorm1d(8),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(8, 4, kernel_size=1, dilation=16),\n",
    "            nn.BatchNorm1d(4),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(4, 2, kernel_size=1, dilation=32),\n",
    "            nn.BatchNorm1d(2),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout1d(0.2),\n",
    "            nn.Conv1d(2, 1, kernel_size=1, dilation=64),\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        for tcn in self.tcn_list:\n",
    "            x = tcn(x)\n",
    "        return x\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "33bea43c-b315-4da3-b59c-00c8604b6889",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Classify(nn.Module):\n",
    "    #交叉熵损失\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.layer = nn.Sequential(\n",
    "            nn.Linear(60, 512),\n",
    "            nn.Dropout(0.2),\n",
    "            nn.Linear(512, 2),\n",
    "            nn.Softmax(dim=2),\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        return self.layer(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "3e4b8f22-0692-4514-a427-cd671a8ab8e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "#Model的输出为一个(1, 2)的向量，表示概率\n",
    "class Model(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.net = nn.Sequential(\n",
    "            SincConv(),\n",
    "            FrequencyConv(),\n",
    "            TCN(),\n",
    "            Classify(),\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        return self.net(x).squeeze(1)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed16358e-bcb3-4612-8912-34e7b0e6121d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class NetLoss(nn.Module):\n",
    "    def __init__(self, ):\n",
    "        super().__init__()\n",
    "    def forward(self, )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 160,
   "id": "d051bbf5-3fc7-4fb9-a071-5100e8204c61",
   "metadata": {},
   "outputs": [],
   "source": [
    "def infer(file, device=None):\n",
    "    if device is None:\n",
    "        device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "    model = Model()\n",
    "    model.load_state_dict(torch.load('./models/model_step.pt'))\n",
    "    model.eval()\n",
    "    processer = Preprocesser(interval=5)\n",
    "    audio, _ = processer.load_audio(file)\n",
    "    audio = processer.cut_audio(audio)\n",
    "    res_final = torch.tensor([])\n",
    "    for aud in audio:\n",
    "        aud = processer.adjust_length(aud, dataset.target_length)\n",
    "        aud = processer.do_frame(aud)[np.newaxis, :]\n",
    "        aud = torch.tensor(aud)\n",
    "        res = model(aud)\n",
    "        res_final = torch.concat([res_final, res]) \n",
    "    prob = torch.mean(res_final, axis=0)\n",
    "    return prob.detach().numpy().tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 161,
   "id": "71527910-27f5-465a-93fa-769732878b48",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.11535726487636566, 0.8846427202224731]"
      ]
     },
     "execution_count": 161,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "infer(dataset.neg_tar[20])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "id": "cc5d589f-b996-4746-a733-00f7b034a68b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, train_iter,  lr=1e-3, epochs=10, device = None):\n",
    "    if device == None:\n",
    "        device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
    "    model.to(device)\n",
    "    loss_func = nn.Cro()\n",
    "    #使用带权重衰退的Adam作为优化器\n",
    "    opt = torch.optim.AdamW(params=model.parameters(), lr=lr)\n",
    "    for epoch in range(epochs):\n",
    "        with tqdm(train_iter, desc=f\"Epoch {epoch+1}/{epochs}\", unit=\"batch\") as tepoch:\n",
    "            total_loss = 0\n",
    "            for X, y in tepoch:\n",
    "                X, y = X.to(device), y.to(device)\n",
    "                opt.zero_grad()\n",
    "                out = model(X)\n",
    "                loss = loss_func(out.type(torch.float), y.type(torch.float))\n",
    "                print(f\"out:{out}\")\n",
    "                print(f\"ground truth:{y}\")\n",
    "                print(f\"loss:{loss.item()}\")\n",
    "                loss.backward()\n",
    "                opt.step()\n",
    "                total_loss += loss.item()\n",
    "            print(f'total_loss:{total_loss}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "id": "078fcf89-9c10-4368-8fae-ea7db56f1b78",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 118,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model = Model()\n",
    "model.load_state_dict(torch.load('./models/model_step.pt'))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "id": "f58a94d9-1d09-4d66-a7e3-474dfe2deef0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "dataset = AudioDataset(data_path, 'Pos', 'Neg', True)\n",
    "trainer = DataLoader(dataset, batch_size=16, shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "id": "840999b6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:   0%|                                     | 0/51 [00:00<?, ?batch/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1226, 0.8774],\n",
      "        [0.1377, 0.8623],\n",
      "        [0.1600, 0.8400],\n",
      "        [0.0874, 0.9126],\n",
      "        [0.1068, 0.8932],\n",
      "        [0.1291, 0.8709],\n",
      "        [0.0025, 0.9975],\n",
      "        [0.5755, 0.4245],\n",
      "        [0.1039, 0.8961],\n",
      "        [0.0989, 0.9011],\n",
      "        [0.2855, 0.7145],\n",
      "        [0.1163, 0.8837],\n",
      "        [0.1052, 0.8948],\n",
      "        [0.1058, 0.8942],\n",
      "        [0.1230, 0.8770],\n",
      "        [0.1263, 0.8737]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8068506121635437\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:   2%|▌                            | 1/51 [00:02<01:55,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1638, 0.8362],\n",
      "        [0.1975, 0.8025],\n",
      "        [0.0940, 0.9060],\n",
      "        [0.1047, 0.8953],\n",
      "        [0.0793, 0.9207],\n",
      "        [0.3205, 0.6795],\n",
      "        [0.3034, 0.6966],\n",
      "        [0.9719, 0.0281],\n",
      "        [0.1223, 0.8777],\n",
      "        [0.1608, 0.8392],\n",
      "        [0.9537, 0.0463],\n",
      "        [0.9721, 0.0279],\n",
      "        [0.8035, 0.1965],\n",
      "        [0.1375, 0.8625],\n",
      "        [0.0886, 0.9114],\n",
      "        [0.1551, 0.8449]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7056379318237305\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:   4%|█▏                           | 2/51 [00:04<01:53,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.2277e-01, 8.7723e-01],\n",
      "        [3.6026e-01, 6.3974e-01],\n",
      "        [1.3102e-01, 8.6898e-01],\n",
      "        [1.0277e-01, 8.9723e-01],\n",
      "        [1.2211e-01, 8.7789e-01],\n",
      "        [1.3639e-01, 8.6361e-01],\n",
      "        [1.2106e-01, 8.7894e-01],\n",
      "        [1.3110e-01, 8.6890e-01],\n",
      "        [2.2132e-01, 7.7868e-01],\n",
      "        [1.1126e-01, 8.8874e-01],\n",
      "        [8.2412e-02, 9.1759e-01],\n",
      "        [1.8833e-01, 8.1167e-01],\n",
      "        [3.1567e-04, 9.9968e-01],\n",
      "        [7.8680e-02, 9.2132e-01],\n",
      "        [1.2168e-01, 8.7832e-01],\n",
      "        [1.1116e-01, 8.8884e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6159893870353699\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:   6%|█▋                           | 3/51 [00:06<01:51,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.0565e-01, 8.9435e-01],\n",
      "        [1.1886e-01, 8.8114e-01],\n",
      "        [7.3766e-02, 9.2623e-01],\n",
      "        [8.6502e-02, 9.1350e-01],\n",
      "        [1.1934e-01, 8.8066e-01],\n",
      "        [1.1211e-01, 8.8789e-01],\n",
      "        [2.6383e-01, 7.3617e-01],\n",
      "        [1.1133e-01, 8.8867e-01],\n",
      "        [1.3571e-01, 8.6429e-01],\n",
      "        [1.0586e-02, 9.8941e-01],\n",
      "        [7.7551e-05, 9.9992e-01],\n",
      "        [8.2609e-02, 9.1739e-01],\n",
      "        [1.1822e-01, 8.8178e-01],\n",
      "        [1.2006e-01, 8.7994e-01],\n",
      "        [1.3765e-01, 8.6235e-01],\n",
      "        [8.0721e-02, 9.1928e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7745147943496704\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:   8%|██▎                          | 4/51 [00:09<01:49,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0010, 0.9990],\n",
      "        [0.1514, 0.8486],\n",
      "        [0.1160, 0.8840],\n",
      "        [0.0891, 0.9109],\n",
      "        [0.0681, 0.9319],\n",
      "        [0.1236, 0.8764],\n",
      "        [0.0213, 0.9787],\n",
      "        [0.1210, 0.8790],\n",
      "        [0.0018, 0.9982],\n",
      "        [0.1467, 0.8533],\n",
      "        [0.1735, 0.8265],\n",
      "        [0.1063, 0.8937],\n",
      "        [0.0245, 0.9755],\n",
      "        [0.0760, 0.9240],\n",
      "        [0.1576, 0.8424],\n",
      "        [0.0718, 0.9282]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8254649639129639\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  10%|██▊                          | 5/51 [00:11<01:47,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0548, 0.9452],\n",
      "        [0.0785, 0.9215],\n",
      "        [0.0915, 0.9085],\n",
      "        [0.1199, 0.8801],\n",
      "        [0.1186, 0.8814],\n",
      "        [0.0577, 0.9423],\n",
      "        [0.0777, 0.9223],\n",
      "        [0.0793, 0.9207],\n",
      "        [0.0094, 0.9906],\n",
      "        [0.1153, 0.8847],\n",
      "        [0.1978, 0.8022],\n",
      "        [0.1017, 0.8983],\n",
      "        [0.0596, 0.9404],\n",
      "        [0.0582, 0.9418],\n",
      "        [0.1121, 0.8879],\n",
      "        [0.0833, 0.9167]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:1.478004813194275\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  12%|███▍                         | 6/51 [00:13<01:44,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1449, 0.8551],\n",
      "        [0.1306, 0.8694],\n",
      "        [0.0043, 0.9957],\n",
      "        [0.1044, 0.8956],\n",
      "        [0.0875, 0.9125],\n",
      "        [0.1467, 0.8533],\n",
      "        [0.0608, 0.9392],\n",
      "        [0.1618, 0.8382],\n",
      "        [0.0070, 0.9930],\n",
      "        [0.1084, 0.8916],\n",
      "        [0.1171, 0.8829],\n",
      "        [0.1115, 0.8885],\n",
      "        [0.0014, 0.9986],\n",
      "        [0.1311, 0.8689],\n",
      "        [0.1080, 0.8920],\n",
      "        [0.1019, 0.8981]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:1.3855490684509277\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  14%|███▉                         | 7/51 [00:16<01:43,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1491, 0.8509],\n",
      "        [0.1115, 0.8885],\n",
      "        [0.1499, 0.8501],\n",
      "        [0.0379, 0.9621],\n",
      "        [0.0611, 0.9389],\n",
      "        [0.0351, 0.9649],\n",
      "        [0.1394, 0.8606],\n",
      "        [0.1989, 0.8011],\n",
      "        [0.1933, 0.8067],\n",
      "        [0.1387, 0.8613],\n",
      "        [0.1276, 0.8724],\n",
      "        [0.1237, 0.8763],\n",
      "        [0.0099, 0.9901],\n",
      "        [0.3222, 0.6778],\n",
      "        [0.1558, 0.8442],\n",
      "        [0.0769, 0.9231]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7423669099807739\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  16%|████▌                        | 8/51 [00:18<01:42,  2.38s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2162, 0.7838],\n",
      "        [0.2138, 0.7862],\n",
      "        [0.8610, 0.1390],\n",
      "        [0.1473, 0.8527],\n",
      "        [0.2461, 0.7539],\n",
      "        [0.0044, 0.9956],\n",
      "        [0.6584, 0.3416],\n",
      "        [0.2788, 0.7212],\n",
      "        [0.2097, 0.7903],\n",
      "        [0.1583, 0.8417],\n",
      "        [0.1520, 0.8480],\n",
      "        [0.1635, 0.8365],\n",
      "        [0.1488, 0.8512],\n",
      "        [0.1421, 0.8579],\n",
      "        [0.3483, 0.6517],\n",
      "        [0.1491, 0.8509]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.47800177335739136\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  18%|█████                        | 9/51 [00:21<01:39,  2.38s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[5.0797e-01, 4.9203e-01],\n",
      "        [2.0374e-01, 7.9626e-01],\n",
      "        [3.0666e-01, 6.9334e-01],\n",
      "        [1.7150e-01, 8.2850e-01],\n",
      "        [4.3251e-01, 5.6749e-01],\n",
      "        [1.4495e-01, 8.5505e-01],\n",
      "        [1.3819e-01, 8.6181e-01],\n",
      "        [2.5352e-01, 7.4648e-01],\n",
      "        [1.0973e-01, 8.9027e-01],\n",
      "        [2.3782e-01, 7.6218e-01],\n",
      "        [1.7960e-01, 8.2040e-01],\n",
      "        [8.2222e-01, 1.7778e-01],\n",
      "        [2.8659e-01, 7.1341e-01],\n",
      "        [2.0943e-01, 7.9057e-01],\n",
      "        [9.9999e-01, 5.4265e-06],\n",
      "        [1.8365e-01, 8.1635e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:1.3233134746551514\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  20%|█████▍                      | 10/51 [00:23<01:36,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.6395e-01, 8.3605e-01],\n",
      "        [3.4892e-01, 6.5108e-01],\n",
      "        [3.4144e-01, 6.5856e-01],\n",
      "        [1.8188e-01, 8.1812e-01],\n",
      "        [1.6861e-01, 8.3139e-01],\n",
      "        [2.0438e-01, 7.9562e-01],\n",
      "        [1.7724e-01, 8.2276e-01],\n",
      "        [1.2648e-01, 8.7352e-01],\n",
      "        [3.2630e-01, 6.7370e-01],\n",
      "        [2.1517e-01, 7.8483e-01],\n",
      "        [1.7139e-01, 8.2861e-01],\n",
      "        [1.2718e-01, 8.7282e-01],\n",
      "        [1.7140e-01, 8.2860e-01],\n",
      "        [3.4921e-01, 6.5079e-01],\n",
      "        [2.9119e-01, 7.0881e-01],\n",
      "        [9.9952e-01, 4.7708e-04]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.42228713631629944\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  22%|██████                      | 11/51 [00:25<01:34,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1361, 0.8639],\n",
      "        [0.0588, 0.9412],\n",
      "        [0.5886, 0.4114],\n",
      "        [0.1191, 0.8809],\n",
      "        [0.3128, 0.6872],\n",
      "        [0.1942, 0.8058],\n",
      "        [0.6772, 0.3228],\n",
      "        [0.3620, 0.6380],\n",
      "        [0.1034, 0.8966],\n",
      "        [0.9865, 0.0135],\n",
      "        [0.5183, 0.4817],\n",
      "        [0.7966, 0.2034],\n",
      "        [0.3925, 0.6075],\n",
      "        [0.4176, 0.5824],\n",
      "        [0.3490, 0.6510],\n",
      "        [0.2801, 0.7199]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5471963286399841\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  24%|██████▌                     | 12/51 [00:28<01:31,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[9.9358e-01, 6.4188e-03],\n",
      "        [1.8954e-01, 8.1046e-01],\n",
      "        [3.3330e-01, 6.6670e-01],\n",
      "        [1.8862e-01, 8.1138e-01],\n",
      "        [2.3083e-01, 7.6917e-01],\n",
      "        [2.3120e-01, 7.6880e-01],\n",
      "        [1.7420e-01, 8.2580e-01],\n",
      "        [1.9708e-01, 8.0292e-01],\n",
      "        [1.7761e-01, 8.2239e-01],\n",
      "        [1.7681e-01, 8.2319e-01],\n",
      "        [1.9957e-01, 8.0043e-01],\n",
      "        [2.0396e-01, 7.9604e-01],\n",
      "        [3.8789e-01, 6.1211e-01],\n",
      "        [1.8434e-01, 8.1566e-01],\n",
      "        [9.9973e-01, 2.6744e-04],\n",
      "        [1.0547e-01, 8.9453e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:1.0455809831619263\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  25%|███████▏                    | 13/51 [00:30<01:28,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1165, 0.8835],\n",
      "        [0.8718, 0.1282],\n",
      "        [0.8690, 0.1310],\n",
      "        [0.1140, 0.8860],\n",
      "        [0.2231, 0.7769],\n",
      "        [0.1245, 0.8755],\n",
      "        [0.0987, 0.9013],\n",
      "        [0.1926, 0.8074],\n",
      "        [0.3804, 0.6196],\n",
      "        [0.9892, 0.0108],\n",
      "        [0.1024, 0.8976],\n",
      "        [0.1581, 0.8419],\n",
      "        [0.1383, 0.8617],\n",
      "        [0.2231, 0.7769],\n",
      "        [0.5418, 0.4582],\n",
      "        [0.1746, 0.8254]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6204839944839478\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  27%|███████▋                    | 14/51 [00:32<01:25,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2665, 0.7335],\n",
      "        [0.1488, 0.8512],\n",
      "        [0.1282, 0.8718],\n",
      "        [0.3418, 0.6582],\n",
      "        [0.3854, 0.6146],\n",
      "        [0.3199, 0.6801],\n",
      "        [0.3796, 0.6204],\n",
      "        [0.3994, 0.6006],\n",
      "        [0.0338, 0.9662],\n",
      "        [0.2490, 0.7510],\n",
      "        [0.3240, 0.6760],\n",
      "        [0.9960, 0.0040],\n",
      "        [0.3048, 0.6952],\n",
      "        [0.5238, 0.4762],\n",
      "        [0.2746, 0.7254],\n",
      "        [0.5197, 0.4803]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5122973322868347\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  29%|████████▏                   | 15/51 [00:35<01:23,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3673, 0.6327],\n",
      "        [0.0662, 0.9338],\n",
      "        [0.3723, 0.6277],\n",
      "        [0.1475, 0.8525],\n",
      "        [0.3402, 0.6598],\n",
      "        [0.3478, 0.6522],\n",
      "        [0.3503, 0.6497],\n",
      "        [0.1714, 0.8286],\n",
      "        [0.9531, 0.0469],\n",
      "        [0.9214, 0.0786],\n",
      "        [0.1444, 0.8556],\n",
      "        [0.1883, 0.8117],\n",
      "        [0.1101, 0.8899],\n",
      "        [0.1724, 0.8276],\n",
      "        [0.1205, 0.8795],\n",
      "        [0.8245, 0.1755]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6622456312179565\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  31%|████████▊                   | 16/51 [00:37<01:22,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1655, 0.8345],\n",
      "        [0.1447, 0.8553],\n",
      "        [0.1832, 0.8168],\n",
      "        [0.9173, 0.0827],\n",
      "        [0.7784, 0.2216],\n",
      "        [0.1665, 0.8335],\n",
      "        [0.1613, 0.8387],\n",
      "        [0.4040, 0.5960],\n",
      "        [0.1441, 0.8559],\n",
      "        [0.3016, 0.6984],\n",
      "        [0.2184, 0.7816],\n",
      "        [0.8306, 0.1694],\n",
      "        [0.2034, 0.7966],\n",
      "        [0.1631, 0.8369],\n",
      "        [0.2621, 0.7379],\n",
      "        [0.1040, 0.8960]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7036443948745728\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  33%|█████████▎                  | 17/51 [00:39<01:20,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1717, 0.8283],\n",
      "        [0.1541, 0.8459],\n",
      "        [0.3176, 0.6824],\n",
      "        [0.3438, 0.6562],\n",
      "        [0.7118, 0.2882],\n",
      "        [0.3009, 0.6991],\n",
      "        [0.1604, 0.8396],\n",
      "        [0.0706, 0.9294],\n",
      "        [0.1393, 0.8607],\n",
      "        [0.1757, 0.8243],\n",
      "        [0.1477, 0.8523],\n",
      "        [0.1298, 0.8702],\n",
      "        [0.2472, 0.7528],\n",
      "        [0.1893, 0.8107],\n",
      "        [0.3216, 0.6784],\n",
      "        [0.0939, 0.9061]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4975723624229431\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  35%|█████████▉                  | 18/51 [00:42<01:17,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1747, 0.8253],\n",
      "        [0.0567, 0.9433],\n",
      "        [0.4723, 0.5277],\n",
      "        [0.1084, 0.8916],\n",
      "        [0.2303, 0.7697],\n",
      "        [0.5771, 0.4229],\n",
      "        [0.1825, 0.8175],\n",
      "        [0.2519, 0.7481],\n",
      "        [0.2923, 0.7077],\n",
      "        [0.1718, 0.8282],\n",
      "        [0.6388, 0.3612],\n",
      "        [0.0891, 0.9109],\n",
      "        [0.2984, 0.7016],\n",
      "        [0.2231, 0.7769],\n",
      "        [0.1941, 0.8059],\n",
      "        [0.1897, 0.8103]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5410406589508057\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  37%|██████████▍                 | 19/51 [00:44<01:15,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1519, 0.8481],\n",
      "        [0.1258, 0.8742],\n",
      "        [0.0699, 0.9301],\n",
      "        [0.1839, 0.8161],\n",
      "        [0.8821, 0.1179],\n",
      "        [0.2104, 0.7896],\n",
      "        [0.1449, 0.8551],\n",
      "        [0.3223, 0.6777],\n",
      "        [0.9236, 0.0764],\n",
      "        [0.1108, 0.8892],\n",
      "        [0.3322, 0.6678],\n",
      "        [0.1611, 0.8389],\n",
      "        [0.2022, 0.7978],\n",
      "        [0.3530, 0.6470],\n",
      "        [0.1532, 0.8468],\n",
      "        [0.3103, 0.6897]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5440646409988403\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  39%|██████████▉                 | 20/51 [00:46<01:12,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1871, 0.8129],\n",
      "        [0.1888, 0.8112],\n",
      "        [0.0878, 0.9122],\n",
      "        [0.0281, 0.9719],\n",
      "        [0.1865, 0.8135],\n",
      "        [0.1773, 0.8227],\n",
      "        [0.0282, 0.9718],\n",
      "        [0.1723, 0.8277],\n",
      "        [0.1898, 0.8102],\n",
      "        [0.1581, 0.8419],\n",
      "        [0.1703, 0.8297],\n",
      "        [0.2125, 0.7875],\n",
      "        [0.0165, 0.9835],\n",
      "        [0.1446, 0.8554],\n",
      "        [0.7773, 0.2227],\n",
      "        [0.2055, 0.7945]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5217021107673645\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  41%|███████████▌                | 21/51 [00:49<01:09,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0901, 0.9099],\n",
      "        [0.0038, 0.9962],\n",
      "        [0.1955, 0.8045],\n",
      "        [0.0238, 0.9762],\n",
      "        [0.1915, 0.8085],\n",
      "        [0.0543, 0.9457],\n",
      "        [0.1809, 0.8191],\n",
      "        [0.1460, 0.8540],\n",
      "        [0.1351, 0.8649],\n",
      "        [0.0951, 0.9049],\n",
      "        [0.1498, 0.8502],\n",
      "        [0.1749, 0.8251],\n",
      "        [0.6046, 0.3954],\n",
      "        [0.0508, 0.9492],\n",
      "        [0.1165, 0.8835],\n",
      "        [0.1150, 0.8850]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6368972659111023\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  43%|████████████                | 22/51 [00:51<01:07,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1769, 0.8231],\n",
      "        [0.1750, 0.8250],\n",
      "        [0.1648, 0.8352],\n",
      "        [0.1705, 0.8295],\n",
      "        [0.1300, 0.8700],\n",
      "        [0.1250, 0.8750],\n",
      "        [0.1627, 0.8373],\n",
      "        [0.0020, 0.9980],\n",
      "        [0.0039, 0.9961],\n",
      "        [0.1462, 0.8538],\n",
      "        [0.1289, 0.8711],\n",
      "        [0.1588, 0.8412],\n",
      "        [0.0140, 0.9860],\n",
      "        [0.1779, 0.8221],\n",
      "        [0.0439, 0.9561],\n",
      "        [0.1243, 0.8757]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.24858257174491882\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  45%|████████████▋               | 23/51 [00:53<01:04,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.0173e-01, 7.9827e-01],\n",
      "        [1.3437e-01, 8.6563e-01],\n",
      "        [2.3776e-01, 7.6224e-01],\n",
      "        [2.0097e-01, 7.9903e-01],\n",
      "        [1.1092e-01, 8.8908e-01],\n",
      "        [7.2595e-02, 9.2740e-01],\n",
      "        [5.4764e-01, 4.5236e-01],\n",
      "        [2.0630e-01, 7.9370e-01],\n",
      "        [1.2078e-01, 8.7922e-01],\n",
      "        [2.0552e-01, 7.9448e-01],\n",
      "        [1.7857e-01, 8.2143e-01],\n",
      "        [6.2758e-03, 9.9372e-01],\n",
      "        [4.7208e-06, 1.0000e+00],\n",
      "        [1.6884e-01, 8.3116e-01],\n",
      "        [2.3213e-01, 7.6787e-01],\n",
      "        [4.0805e-02, 9.5920e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:1.2590575218200684\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  47%|█████████████▏              | 24/51 [00:56<01:02,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0012, 0.9988],\n",
      "        [0.1105, 0.8895],\n",
      "        [0.1758, 0.8242],\n",
      "        [0.0078, 0.9922],\n",
      "        [0.1056, 0.8944],\n",
      "        [0.0933, 0.9067],\n",
      "        [0.1019, 0.8981],\n",
      "        [0.0877, 0.9123],\n",
      "        [0.2239, 0.7761],\n",
      "        [0.1729, 0.8271],\n",
      "        [0.1284, 0.8716],\n",
      "        [0.2584, 0.7416],\n",
      "        [0.0494, 0.9506],\n",
      "        [0.1246, 0.8754],\n",
      "        [0.1046, 0.8954],\n",
      "        [0.0595, 0.9405]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.440784752368927\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  49%|█████████████▋              | 25/51 [00:58<01:00,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.2792e-01, 7.7208e-01],\n",
      "        [5.5870e-02, 9.4413e-01],\n",
      "        [1.7861e-01, 8.2139e-01],\n",
      "        [1.6940e-01, 8.3060e-01],\n",
      "        [1.0237e-01, 8.9763e-01],\n",
      "        [6.6567e-02, 9.3343e-01],\n",
      "        [6.4288e-02, 9.3571e-01],\n",
      "        [1.9245e-01, 8.0755e-01],\n",
      "        [1.5988e-01, 8.4012e-01],\n",
      "        [1.2249e-01, 8.7751e-01],\n",
      "        [2.3281e-01, 7.6719e-01],\n",
      "        [6.0374e-04, 9.9940e-01],\n",
      "        [1.8245e-01, 8.1755e-01],\n",
      "        [1.7126e-01, 8.2874e-01],\n",
      "        [1.6264e-01, 8.3736e-01],\n",
      "        [1.7771e-01, 8.2229e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.2317885309457779\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  51%|██████████████▎             | 26/51 [01:00<00:57,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.2493e-01, 7.7507e-01],\n",
      "        [1.5928e-01, 8.4072e-01],\n",
      "        [7.7728e-02, 9.2227e-01],\n",
      "        [1.9295e-01, 8.0705e-01],\n",
      "        [2.2072e-01, 7.7928e-01],\n",
      "        [1.1325e-01, 8.8675e-01],\n",
      "        [5.2388e-02, 9.4761e-01],\n",
      "        [7.2810e-02, 9.2719e-01],\n",
      "        [2.3720e-01, 7.6280e-01],\n",
      "        [1.3825e-01, 8.6175e-01],\n",
      "        [2.0067e-01, 7.9933e-01],\n",
      "        [3.3598e-03, 9.9664e-01],\n",
      "        [2.8721e-02, 9.7128e-01],\n",
      "        [9.2573e-02, 9.0743e-01],\n",
      "        [2.1452e-01, 7.8548e-01],\n",
      "        [2.9276e-04, 9.9971e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.396875262260437\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  53%|██████████████▊             | 27/51 [01:02<00:55,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0967, 0.9033],\n",
      "        [0.0827, 0.9173],\n",
      "        [0.2202, 0.7798],\n",
      "        [0.0926, 0.9074],\n",
      "        [0.1228, 0.8772],\n",
      "        [0.0894, 0.9106],\n",
      "        [0.1873, 0.8127],\n",
      "        [0.2018, 0.7982],\n",
      "        [0.0826, 0.9174],\n",
      "        [0.1131, 0.8869],\n",
      "        [0.1963, 0.8037],\n",
      "        [0.0039, 0.9961],\n",
      "        [0.0762, 0.9238],\n",
      "        [0.1845, 0.8155],\n",
      "        [0.1202, 0.8798],\n",
      "        [0.0173, 0.9827]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7028249502182007\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  55%|███████████████▎            | 28/51 [01:05<00:53,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1601, 0.8399],\n",
      "        [0.8088, 0.1912],\n",
      "        [0.1691, 0.8309],\n",
      "        [0.1772, 0.8228],\n",
      "        [0.1759, 0.8241],\n",
      "        [0.1186, 0.8814],\n",
      "        [0.0272, 0.9728],\n",
      "        [0.0245, 0.9755],\n",
      "        [0.0208, 0.9792],\n",
      "        [0.0612, 0.9388],\n",
      "        [0.0850, 0.9150],\n",
      "        [0.1118, 0.8882],\n",
      "        [0.1202, 0.8798],\n",
      "        [0.1573, 0.8427],\n",
      "        [0.0058, 0.9942],\n",
      "        [0.1930, 0.8070]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:1.6710611581802368\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  57%|███████████████▉            | 29/51 [01:07<00:50,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1663, 0.8337],\n",
      "        [0.1220, 0.8780],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.0557, 0.9443],\n",
      "        [0.1967, 0.8033],\n",
      "        [0.1640, 0.8360],\n",
      "        [0.1665, 0.8335],\n",
      "        [0.6627, 0.3373],\n",
      "        [0.1926, 0.8074],\n",
      "        [0.0036, 0.9964],\n",
      "        [0.0050, 0.9950],\n",
      "        [0.0438, 0.9562],\n",
      "        [0.2067, 0.7933],\n",
      "        [0.2323, 0.7677],\n",
      "        [0.3359, 0.6641],\n",
      "        [0.0885, 0.9115]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4142952561378479\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  59%|████████████████▍           | 30/51 [01:09<00:48,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1899, 0.8101],\n",
      "        [0.1900, 0.8100],\n",
      "        [0.2033, 0.7967],\n",
      "        [0.0499, 0.9501],\n",
      "        [0.1526, 0.8474],\n",
      "        [0.2268, 0.7732],\n",
      "        [0.1091, 0.8909],\n",
      "        [0.0155, 0.9845],\n",
      "        [0.1694, 0.8306],\n",
      "        [0.2163, 0.7837],\n",
      "        [0.1983, 0.8017],\n",
      "        [0.1999, 0.8001],\n",
      "        [0.0066, 0.9934],\n",
      "        [0.1559, 0.8441],\n",
      "        [0.1587, 0.8413],\n",
      "        [0.1444, 0.8556]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.3561345338821411\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  61%|█████████████████           | 31/51 [01:12<00:45,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.3158e-01, 8.6842e-01],\n",
      "        [1.6746e-01, 8.3254e-01],\n",
      "        [1.4223e-01, 8.5777e-01],\n",
      "        [1.6311e-01, 8.3689e-01],\n",
      "        [2.4792e-01, 7.5208e-01],\n",
      "        [6.2517e-01, 3.7483e-01],\n",
      "        [1.1257e-01, 8.8743e-01],\n",
      "        [2.1227e-01, 7.8773e-01],\n",
      "        [1.6474e-01, 8.3526e-01],\n",
      "        [1.9420e-01, 8.0580e-01],\n",
      "        [1.7812e-01, 8.2188e-01],\n",
      "        [2.0017e-01, 7.9983e-01],\n",
      "        [2.3281e-04, 9.9977e-01],\n",
      "        [1.0103e-01, 8.9897e-01],\n",
      "        [1.9563e-01, 8.0437e-01],\n",
      "        [1.4911e-01, 8.5089e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.2931978404521942\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  63%|█████████████████▌          | 32/51 [01:14<00:43,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4040, 0.5960],\n",
      "        [0.1955, 0.8045],\n",
      "        [0.2458, 0.7542],\n",
      "        [0.1272, 0.8728],\n",
      "        [0.1105, 0.8895],\n",
      "        [0.1866, 0.8134],\n",
      "        [0.1752, 0.8248],\n",
      "        [0.1840, 0.8160],\n",
      "        [0.1618, 0.8382],\n",
      "        [0.9958, 0.0042],\n",
      "        [0.1165, 0.8835],\n",
      "        [0.1899, 0.8101],\n",
      "        [0.0795, 0.9205],\n",
      "        [0.1896, 0.8104],\n",
      "        [0.1862, 0.8138],\n",
      "        [0.1786, 0.8214]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3596520721912384\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  65%|██████████████████          | 33/51 [01:16<00:41,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1968, 0.8032],\n",
      "        [0.2012, 0.7988],\n",
      "        [0.0263, 0.9737],\n",
      "        [0.1980, 0.8020],\n",
      "        [0.0806, 0.9194],\n",
      "        [0.2306, 0.7694],\n",
      "        [0.2028, 0.7972],\n",
      "        [0.1253, 0.8747],\n",
      "        [0.0099, 0.9901],\n",
      "        [0.2071, 0.7929],\n",
      "        [0.1549, 0.8451],\n",
      "        [0.1959, 0.8041],\n",
      "        [0.1944, 0.8056],\n",
      "        [0.1740, 0.8260],\n",
      "        [0.1766, 0.8234],\n",
      "        [0.2082, 0.7918]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6781662702560425\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  67%|██████████████████▋         | 34/51 [01:18<00:38,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0281, 0.9719],\n",
      "        [0.1908, 0.8092],\n",
      "        [0.1753, 0.8247],\n",
      "        [0.3622, 0.6378],\n",
      "        [0.2074, 0.7926],\n",
      "        [0.3822, 0.6178],\n",
      "        [0.2287, 0.7713],\n",
      "        [0.9953, 0.0047],\n",
      "        [0.1906, 0.8094],\n",
      "        [0.1149, 0.8851],\n",
      "        [0.1507, 0.8493],\n",
      "        [0.3841, 0.6159],\n",
      "        [0.1998, 0.8002],\n",
      "        [0.0012, 0.9988],\n",
      "        [0.1763, 0.8237],\n",
      "        [0.2074, 0.7926]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.9409842491149902\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  69%|███████████████████▏        | 35/51 [01:21<00:36,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0677, 0.9323],\n",
      "        [0.2615, 0.7385],\n",
      "        [0.2476, 0.7524],\n",
      "        [0.2100, 0.7900],\n",
      "        [0.1969, 0.8031],\n",
      "        [0.1006, 0.8994],\n",
      "        [0.2115, 0.7885],\n",
      "        [0.2447, 0.7553],\n",
      "        [0.1732, 0.8268],\n",
      "        [0.5280, 0.4720],\n",
      "        [0.1918, 0.8082],\n",
      "        [0.2298, 0.7702],\n",
      "        [0.2079, 0.7921],\n",
      "        [0.2024, 0.7976],\n",
      "        [0.2083, 0.7917],\n",
      "        [0.4723, 0.5277]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.7775837182998657\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  71%|███████████████████▊        | 36/51 [01:23<00:34,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1812, 0.8188],\n",
      "        [0.2344, 0.7656],\n",
      "        [0.2351, 0.7649],\n",
      "        [0.1906, 0.8094],\n",
      "        [0.0530, 0.9470],\n",
      "        [0.0186, 0.9814],\n",
      "        [0.1912, 0.8088],\n",
      "        [0.3234, 0.6766],\n",
      "        [0.6122, 0.3878],\n",
      "        [0.2521, 0.7479],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.1902, 0.8098],\n",
      "        [0.6373, 0.3627],\n",
      "        [0.1198, 0.8802],\n",
      "        [0.1907, 0.8093],\n",
      "        [0.5029, 0.4971]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8811439275741577\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  73%|████████████████████▎       | 37/51 [01:25<00:31,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2474, 0.7526],\n",
      "        [0.1192, 0.8808],\n",
      "        [0.6672, 0.3328],\n",
      "        [0.3651, 0.6349],\n",
      "        [0.2606, 0.7394],\n",
      "        [0.1430, 0.8570],\n",
      "        [0.0752, 0.9248],\n",
      "        [0.3435, 0.6565],\n",
      "        [0.1543, 0.8457],\n",
      "        [0.2649, 0.7351],\n",
      "        [0.0793, 0.9207],\n",
      "        [0.1824, 0.8176],\n",
      "        [0.2699, 0.7301],\n",
      "        [0.3398, 0.6602],\n",
      "        [0.1758, 0.8242],\n",
      "        [0.9441, 0.0559]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8464042544364929\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  75%|████████████████████▊       | 38/51 [01:28<00:29,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.6364, 0.3636],\n",
      "        [0.3131, 0.6869],\n",
      "        [0.1897, 0.8103],\n",
      "        [0.1918, 0.8082],\n",
      "        [0.3267, 0.6733],\n",
      "        [0.2311, 0.7689],\n",
      "        [0.1875, 0.8125],\n",
      "        [0.2842, 0.7158],\n",
      "        [0.1757, 0.8243],\n",
      "        [0.1808, 0.8192],\n",
      "        [0.2057, 0.7943],\n",
      "        [0.2765, 0.7235],\n",
      "        [0.3056, 0.6944],\n",
      "        [0.2012, 0.7988],\n",
      "        [0.2010, 0.7990],\n",
      "        [0.2088, 0.7912]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.39911043643951416\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  76%|█████████████████████▍      | 39/51 [01:30<00:27,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2406, 0.7594],\n",
      "        [0.3250, 0.6750],\n",
      "        [0.0162, 0.9838],\n",
      "        [0.2924, 0.7076],\n",
      "        [0.5817, 0.4183],\n",
      "        [0.4181, 0.5819],\n",
      "        [0.2656, 0.7344],\n",
      "        [0.0164, 0.9836],\n",
      "        [0.1989, 0.8011],\n",
      "        [0.3332, 0.6668],\n",
      "        [0.2019, 0.7981],\n",
      "        [0.2727, 0.7273],\n",
      "        [0.1833, 0.8167],\n",
      "        [0.2024, 0.7976],\n",
      "        [0.1620, 0.8380],\n",
      "        [0.2007, 0.7993]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5133362412452698\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  78%|█████████████████████▉      | 40/51 [01:32<00:25,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2173, 0.7827],\n",
      "        [0.2293, 0.7707],\n",
      "        [0.2050, 0.7950],\n",
      "        [0.2327, 0.7673],\n",
      "        [0.2433, 0.7567],\n",
      "        [0.2092, 0.7908],\n",
      "        [0.2139, 0.7861],\n",
      "        [0.2838, 0.7162],\n",
      "        [0.2218, 0.7782],\n",
      "        [0.5447, 0.4553],\n",
      "        [0.2342, 0.7658],\n",
      "        [0.2321, 0.7679],\n",
      "        [0.2145, 0.7855],\n",
      "        [0.2123, 0.7877],\n",
      "        [0.2296, 0.7704],\n",
      "        [0.2280, 0.7720]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4291299283504486\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  80%|██████████████████████▌     | 41/51 [01:34<00:22,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1977, 0.8023],\n",
      "        [0.1043, 0.8957],\n",
      "        [0.2325, 0.7675],\n",
      "        [0.2053, 0.7947],\n",
      "        [0.2148, 0.7852],\n",
      "        [0.2985, 0.7015],\n",
      "        [0.2151, 0.7849],\n",
      "        [0.2515, 0.7485],\n",
      "        [0.1852, 0.8148],\n",
      "        [0.0705, 0.9295],\n",
      "        [0.2140, 0.7860],\n",
      "        [0.2278, 0.7722],\n",
      "        [0.1980, 0.8020],\n",
      "        [0.2377, 0.7623],\n",
      "        [0.1264, 0.8736],\n",
      "        [0.2609, 0.7391]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.38427111506462097\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  82%|███████████████████████     | 42/51 [01:37<00:20,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2750, 0.7250],\n",
      "        [0.2323, 0.7677],\n",
      "        [0.2121, 0.7879],\n",
      "        [0.0219, 0.9781],\n",
      "        [0.2160, 0.7840],\n",
      "        [0.2024, 0.7976],\n",
      "        [0.0736, 0.9264],\n",
      "        [0.2309, 0.7691],\n",
      "        [0.2408, 0.7592],\n",
      "        [0.2125, 0.7875],\n",
      "        [0.2085, 0.7915],\n",
      "        [0.3763, 0.6237],\n",
      "        [0.4389, 0.5611],\n",
      "        [0.2320, 0.7680],\n",
      "        [0.2245, 0.7755],\n",
      "        [0.3093, 0.6907]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.44950079917907715\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  84%|███████████████████████▌    | 43/51 [01:39<00:18,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0081, 0.9919],\n",
      "        [0.9270, 0.0730],\n",
      "        [0.2107, 0.7893],\n",
      "        [0.8589, 0.1411],\n",
      "        [0.1988, 0.8012],\n",
      "        [0.0331, 0.9669],\n",
      "        [0.6699, 0.3301],\n",
      "        [0.2055, 0.7945],\n",
      "        [0.0022, 0.9978],\n",
      "        [0.2325, 0.7675],\n",
      "        [0.2027, 0.7973],\n",
      "        [0.2229, 0.7771],\n",
      "        [0.2096, 0.7904],\n",
      "        [0.2108, 0.7892],\n",
      "        [0.1458, 0.8542],\n",
      "        [0.2200, 0.7800]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8781086206436157\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  86%|████████████████████████▏   | 44/51 [01:41<00:16,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.7899, 0.2101],\n",
      "        [0.2128, 0.7872],\n",
      "        [0.2318, 0.7682],\n",
      "        [0.1045, 0.8955],\n",
      "        [0.2191, 0.7809],\n",
      "        [0.2406, 0.7594],\n",
      "        [0.1346, 0.8654],\n",
      "        [0.1502, 0.8498],\n",
      "        [0.2077, 0.7923],\n",
      "        [0.1244, 0.8756],\n",
      "        [0.2123, 0.7877],\n",
      "        [0.1115, 0.8885],\n",
      "        [0.2090, 0.7910],\n",
      "        [0.2058, 0.7942],\n",
      "        [0.1263, 0.8737],\n",
      "        [0.1978, 0.8022]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.65519118309021\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  88%|████████████████████████▋   | 45/51 [01:44<00:13,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2221, 0.7779],\n",
      "        [0.2112, 0.7888],\n",
      "        [0.2943, 0.7057],\n",
      "        [0.1478, 0.8522],\n",
      "        [0.1748, 0.8252],\n",
      "        [0.2308, 0.7692],\n",
      "        [0.2165, 0.7835],\n",
      "        [0.1871, 0.8129],\n",
      "        [0.2162, 0.7838],\n",
      "        [0.2154, 0.7846],\n",
      "        [0.2221, 0.7779],\n",
      "        [0.2138, 0.7862],\n",
      "        [0.2159, 0.7841],\n",
      "        [0.0215, 0.9785],\n",
      "        [0.2451, 0.7549],\n",
      "        [0.1822, 0.8178]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5568884611129761\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  90%|█████████████████████████▎  | 46/51 [01:46<00:11,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2240, 0.7760],\n",
      "        [0.0300, 0.9700],\n",
      "        [0.1423, 0.8577],\n",
      "        [0.0698, 0.9302],\n",
      "        [0.2223, 0.7777],\n",
      "        [0.2153, 0.7847],\n",
      "        [0.1757, 0.8243],\n",
      "        [0.2500, 0.7500],\n",
      "        [0.2406, 0.7594],\n",
      "        [0.2100, 0.7900],\n",
      "        [0.2091, 0.7909],\n",
      "        [0.2313, 0.7687],\n",
      "        [0.0677, 0.9323],\n",
      "        [0.1836, 0.8164],\n",
      "        [0.1545, 0.8455],\n",
      "        [0.1482, 0.8518]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6116055250167847\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  92%|█████████████████████████▊  | 47/51 [01:48<00:09,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0899, 0.9101],\n",
      "        [0.0843, 0.9157],\n",
      "        [0.0011, 0.9989],\n",
      "        [0.6725, 0.3275],\n",
      "        [0.2306, 0.7694],\n",
      "        [0.1281, 0.8719],\n",
      "        [0.1748, 0.8252],\n",
      "        [0.1734, 0.8266],\n",
      "        [0.3416, 0.6584],\n",
      "        [0.2168, 0.7832],\n",
      "        [0.6625, 0.3375],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.0223, 0.9777],\n",
      "        [0.2357, 0.7643],\n",
      "        [0.1532, 0.8468],\n",
      "        [0.1896, 0.8104]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.8816317319869995\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  94%|██████████████████████████▎ | 48/51 [01:51<00:06,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1004, 0.8996],\n",
      "        [0.2226, 0.7774],\n",
      "        [0.2283, 0.7717],\n",
      "        [0.1032, 0.8968],\n",
      "        [0.0578, 0.9422],\n",
      "        [0.2422, 0.7578],\n",
      "        [0.1770, 0.8230],\n",
      "        [0.2070, 0.7930],\n",
      "        [0.2250, 0.7750],\n",
      "        [0.2215, 0.7785],\n",
      "        [0.2108, 0.7892],\n",
      "        [0.1644, 0.8356],\n",
      "        [0.2332, 0.7668],\n",
      "        [0.1668, 0.8332],\n",
      "        [0.8290, 0.1710],\n",
      "        [0.1365, 0.8635]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5909456610679626\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  96%|██████████████████████████▉ | 49/51 [01:53<00:04,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2175, 0.7825],\n",
      "        [0.0441, 0.9559],\n",
      "        [0.1862, 0.8138],\n",
      "        [0.1515, 0.8485],\n",
      "        [0.0095, 0.9905],\n",
      "        [0.1530, 0.8470],\n",
      "        [0.1023, 0.8977],\n",
      "        [0.5891, 0.4109],\n",
      "        [0.2035, 0.7965],\n",
      "        [0.2393, 0.7607],\n",
      "        [0.2302, 0.7698],\n",
      "        [0.2977, 0.7023],\n",
      "        [0.2233, 0.7767],\n",
      "        [0.2188, 0.7812],\n",
      "        [0.0937, 0.9063],\n",
      "        [0.1667, 0.8333]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.48237067461013794\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10:  98%|███████████████████████████▍| 50/51 [01:55<00:02,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.2859e-01, 7.7141e-01],\n",
      "        [1.0000e+00, 3.3774e-06]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:6.431920051574707\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/10: 100%|████████████████████████████| 51/51 [01:56<00:00,  2.28s/batch]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total_loss:40.15325386822224\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:   0%|                                     | 0/51 [00:00<?, ?batch/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2218, 0.7782],\n",
      "        [0.1780, 0.8220],\n",
      "        [0.2014, 0.7986],\n",
      "        [0.2121, 0.7879],\n",
      "        [0.1544, 0.8456],\n",
      "        [0.1021, 0.8979],\n",
      "        [0.1975, 0.8025],\n",
      "        [0.2241, 0.7759],\n",
      "        [0.1828, 0.8172],\n",
      "        [0.4303, 0.5697],\n",
      "        [0.8434, 0.1566],\n",
      "        [0.7701, 0.2299],\n",
      "        [0.2016, 0.7984],\n",
      "        [0.4758, 0.5242],\n",
      "        [0.1327, 0.8673],\n",
      "        [0.1984, 0.8016]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5263738632202148\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:   2%|▌                            | 1/51 [00:02<01:55,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2351, 0.7649],\n",
      "        [0.2778, 0.7222],\n",
      "        [0.2295, 0.7705],\n",
      "        [0.1700, 0.8300],\n",
      "        [0.1754, 0.8246],\n",
      "        [0.0356, 0.9644],\n",
      "        [0.0021, 0.9979],\n",
      "        [0.1888, 0.8112],\n",
      "        [0.1582, 0.8418],\n",
      "        [0.2353, 0.7647],\n",
      "        [0.2348, 0.7652],\n",
      "        [0.0928, 0.9072],\n",
      "        [0.2135, 0.7865],\n",
      "        [0.1782, 0.8218],\n",
      "        [0.2111, 0.7889],\n",
      "        [0.2193, 0.7807]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.40171441435813904\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:   4%|█▏                           | 2/51 [00:04<01:51,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2279, 0.7721],\n",
      "        [0.1753, 0.8247],\n",
      "        [0.1733, 0.8267],\n",
      "        [0.2289, 0.7711],\n",
      "        [0.1918, 0.8082],\n",
      "        [0.0326, 0.9674],\n",
      "        [0.1095, 0.8905],\n",
      "        [0.2350, 0.7650],\n",
      "        [0.2165, 0.7835],\n",
      "        [0.2305, 0.7695],\n",
      "        [0.2179, 0.7821],\n",
      "        [0.0014, 0.9986],\n",
      "        [0.0143, 0.9857],\n",
      "        [0.2359, 0.7641],\n",
      "        [0.1496, 0.8504],\n",
      "        [0.1767, 0.8233]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5091018676757812\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:   6%|█▋                           | 3/51 [00:06<01:49,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2579, 0.7421],\n",
      "        [0.2108, 0.7892],\n",
      "        [0.1428, 0.8572],\n",
      "        [0.2303, 0.7697],\n",
      "        [0.2432, 0.7568],\n",
      "        [0.1451, 0.8549],\n",
      "        [0.0522, 0.9478],\n",
      "        [0.2234, 0.7766],\n",
      "        [0.3530, 0.6470],\n",
      "        [0.2137, 0.7863],\n",
      "        [0.1264, 0.8736],\n",
      "        [0.0409, 0.9591],\n",
      "        [0.1521, 0.8479],\n",
      "        [0.0610, 0.9390],\n",
      "        [0.0269, 0.9731],\n",
      "        [0.2836, 0.7164]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.42866846919059753\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:   8%|██▎                          | 4/51 [00:09<01:46,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1650, 0.8350],\n",
      "        [0.2247, 0.7753],\n",
      "        [0.0763, 0.9237],\n",
      "        [0.9980, 0.0020],\n",
      "        [0.1976, 0.8024],\n",
      "        [0.1647, 0.8353],\n",
      "        [0.2220, 0.7780],\n",
      "        [0.2032, 0.7968],\n",
      "        [0.0026, 0.9974],\n",
      "        [0.0084, 0.9916],\n",
      "        [0.2416, 0.7584],\n",
      "        [0.1944, 0.8056],\n",
      "        [0.2648, 0.7352],\n",
      "        [0.3944, 0.6056],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.1704, 0.8296]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5943387746810913\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  10%|██▊                          | 5/51 [00:11<01:44,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2395, 0.7605],\n",
      "        [0.3167, 0.6833],\n",
      "        [0.1544, 0.8456],\n",
      "        [0.1545, 0.8455],\n",
      "        [0.1413, 0.8587],\n",
      "        [0.1105, 0.8895],\n",
      "        [0.2323, 0.7677],\n",
      "        [0.1675, 0.8325],\n",
      "        [0.2275, 0.7725],\n",
      "        [0.2027, 0.7973],\n",
      "        [0.1784, 0.8216],\n",
      "        [0.1337, 0.8663],\n",
      "        [0.3124, 0.6876],\n",
      "        [0.3243, 0.6757],\n",
      "        [0.2031, 0.7969],\n",
      "        [0.2356, 0.7644]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5107413530349731\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  12%|███▍                         | 6/51 [00:13<01:42,  2.27s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.7380e-01, 8.2620e-01],\n",
      "        [4.9971e-02, 9.5003e-01],\n",
      "        [1.6373e-01, 8.3627e-01],\n",
      "        [1.5874e-01, 8.4126e-01],\n",
      "        [3.0132e-02, 9.6987e-01],\n",
      "        [7.4199e-02, 9.2580e-01],\n",
      "        [2.2131e-01, 7.7869e-01],\n",
      "        [9.9993e-01, 6.8610e-05],\n",
      "        [1.5520e-01, 8.4480e-01],\n",
      "        [2.2707e-01, 7.7293e-01],\n",
      "        [2.3021e-01, 7.6979e-01],\n",
      "        [2.2132e-01, 7.7868e-01],\n",
      "        [6.6902e-02, 9.3310e-01],\n",
      "        [2.3670e-01, 7.6330e-01],\n",
      "        [2.3186e-01, 7.6814e-01],\n",
      "        [4.1345e-02, 9.5865e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.7989510297775269\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  14%|███▉                         | 7/51 [00:16<01:41,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2171, 0.7829],\n",
      "        [0.0388, 0.9612],\n",
      "        [0.2128, 0.7872],\n",
      "        [0.2771, 0.7229],\n",
      "        [0.7679, 0.2321],\n",
      "        [0.1779, 0.8221],\n",
      "        [0.2327, 0.7673],\n",
      "        [0.1084, 0.8916],\n",
      "        [0.2194, 0.7806],\n",
      "        [0.1204, 0.8796],\n",
      "        [0.2327, 0.7673],\n",
      "        [0.1098, 0.8902],\n",
      "        [0.0043, 0.9957],\n",
      "        [0.2186, 0.7814],\n",
      "        [0.2382, 0.7618],\n",
      "        [0.1310, 0.8690]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5727290511131287\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  16%|████▌                        | 8/51 [00:18<01:39,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2275, 0.7725],\n",
      "        [0.2365, 0.7635],\n",
      "        [0.2218, 0.7782],\n",
      "        [0.1604, 0.8396],\n",
      "        [0.2240, 0.7760],\n",
      "        [0.2079, 0.7921],\n",
      "        [0.2292, 0.7708],\n",
      "        [0.1633, 0.8367],\n",
      "        [0.2066, 0.7934],\n",
      "        [0.2720, 0.7280],\n",
      "        [0.1782, 0.8218],\n",
      "        [0.1776, 0.8224],\n",
      "        [0.1987, 0.8013],\n",
      "        [0.2161, 0.7839],\n",
      "        [0.2444, 0.7556],\n",
      "        [0.9825, 0.0175]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.878905177116394\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  18%|█████                        | 9/51 [00:20<01:38,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2186, 0.7814],\n",
      "        [0.3655, 0.6345],\n",
      "        [0.4024, 0.5976],\n",
      "        [0.2350, 0.7650],\n",
      "        [0.1311, 0.8689],\n",
      "        [0.2367, 0.7633],\n",
      "        [0.2542, 0.7458],\n",
      "        [0.2505, 0.7495],\n",
      "        [0.2425, 0.7575],\n",
      "        [0.2282, 0.7718],\n",
      "        [0.2348, 0.7652],\n",
      "        [0.2609, 0.7391],\n",
      "        [0.2308, 0.7692],\n",
      "        [0.5628, 0.4372],\n",
      "        [0.2341, 0.7659],\n",
      "        [0.2605, 0.7395]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5250675082206726\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  20%|█████▍                      | 10/51 [00:23<01:36,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2882, 0.7118],\n",
      "        [0.2565, 0.7435],\n",
      "        [0.1162, 0.8838],\n",
      "        [0.1410, 0.8590],\n",
      "        [0.2626, 0.7374],\n",
      "        [0.9480, 0.0520],\n",
      "        [0.2395, 0.7605],\n",
      "        [0.2157, 0.7843],\n",
      "        [0.2390, 0.7610],\n",
      "        [0.9490, 0.0510],\n",
      "        [0.0550, 0.9450],\n",
      "        [0.0969, 0.9031],\n",
      "        [0.2682, 0.7318],\n",
      "        [0.2468, 0.7532],\n",
      "        [0.2670, 0.7330],\n",
      "        [0.2637, 0.7363]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.47975820302963257\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  22%|██████                      | 11/51 [00:25<01:34,  2.37s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1905, 0.8095],\n",
      "        [0.2974, 0.7026],\n",
      "        [0.2280, 0.7720],\n",
      "        [0.2252, 0.7748],\n",
      "        [0.2344, 0.7656],\n",
      "        [0.1602, 0.8398],\n",
      "        [0.2534, 0.7466],\n",
      "        [0.2338, 0.7662],\n",
      "        [0.2706, 0.7294],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.2513, 0.7487],\n",
      "        [0.1468, 0.8532],\n",
      "        [0.2949, 0.7051],\n",
      "        [0.1085, 0.8915],\n",
      "        [0.3471, 0.6529],\n",
      "        [0.2019, 0.7981]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6547830104827881\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  24%|██████▌                     | 12/51 [00:27<01:32,  2.37s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2589, 0.7411],\n",
      "        [0.2586, 0.7414],\n",
      "        [0.3937, 0.6063],\n",
      "        [0.2131, 0.7869],\n",
      "        [0.2142, 0.7858],\n",
      "        [0.2392, 0.7608],\n",
      "        [0.2421, 0.7579],\n",
      "        [0.2877, 0.7123],\n",
      "        [0.2554, 0.7446],\n",
      "        [0.2376, 0.7624],\n",
      "        [0.2040, 0.7960],\n",
      "        [0.4011, 0.5989],\n",
      "        [0.2500, 0.7500],\n",
      "        [0.2608, 0.7392],\n",
      "        [0.3212, 0.6788],\n",
      "        [0.2086, 0.7914]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5586678385734558\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  25%|███████▏                    | 13/51 [00:30<01:29,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2993, 0.7007],\n",
      "        [0.3317, 0.6683],\n",
      "        [0.0926, 0.9074],\n",
      "        [0.2127, 0.7873],\n",
      "        [0.2940, 0.7060],\n",
      "        [0.2535, 0.7465],\n",
      "        [0.2243, 0.7757],\n",
      "        [0.2635, 0.7365],\n",
      "        [0.7865, 0.2135],\n",
      "        [0.0588, 0.9412],\n",
      "        [0.2525, 0.7475],\n",
      "        [0.1153, 0.8847],\n",
      "        [0.2563, 0.7437],\n",
      "        [0.2675, 0.7325],\n",
      "        [0.2394, 0.7606],\n",
      "        [0.2397, 0.7603]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3418000340461731\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  27%|███████▋                    | 14/51 [00:32<01:26,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.3996e-01, 7.6004e-01],\n",
      "        [2.0282e-01, 7.9718e-01],\n",
      "        [2.7153e-01, 7.2847e-01],\n",
      "        [1.8578e-01, 8.1422e-01],\n",
      "        [2.4880e-01, 7.5120e-01],\n",
      "        [9.9935e-01, 6.4914e-04],\n",
      "        [2.2194e-01, 7.7806e-01],\n",
      "        [3.2154e-01, 6.7846e-01],\n",
      "        [1.6351e-01, 8.3649e-01],\n",
      "        [2.4294e-01, 7.5706e-01],\n",
      "        [2.7877e-01, 7.2123e-01],\n",
      "        [6.5415e-02, 9.3458e-01],\n",
      "        [2.3878e-01, 7.6122e-01],\n",
      "        [2.3130e-01, 7.6870e-01],\n",
      "        [2.5974e-01, 7.4026e-01],\n",
      "        [2.6365e-01, 7.3635e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6493655443191528\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  29%|████████▏                   | 15/51 [00:34<01:24,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1328, 0.8672],\n",
      "        [0.2106, 0.7894],\n",
      "        [0.2388, 0.7612],\n",
      "        [0.9183, 0.0817],\n",
      "        [0.2099, 0.7901],\n",
      "        [0.3264, 0.6736],\n",
      "        [0.2791, 0.7209],\n",
      "        [0.2655, 0.7345],\n",
      "        [0.3544, 0.6456],\n",
      "        [0.1315, 0.8685],\n",
      "        [0.2072, 0.7928],\n",
      "        [0.2528, 0.7472],\n",
      "        [0.0717, 0.9283],\n",
      "        [0.2463, 0.7537],\n",
      "        [0.4124, 0.5876],\n",
      "        [0.1155, 0.8845]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.462660551071167\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  31%|████████▊                   | 16/51 [00:37<01:21,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2476, 0.7524],\n",
      "        [0.2506, 0.7494],\n",
      "        [0.2761, 0.7239],\n",
      "        [0.3630, 0.6370],\n",
      "        [0.3027, 0.6973],\n",
      "        [0.7600, 0.2400],\n",
      "        [0.2696, 0.7304],\n",
      "        [0.2886, 0.7114],\n",
      "        [0.1882, 0.8118],\n",
      "        [0.3172, 0.6828],\n",
      "        [0.2457, 0.7543],\n",
      "        [0.3093, 0.6907],\n",
      "        [0.3353, 0.6647],\n",
      "        [0.2734, 0.7266],\n",
      "        [0.1186, 0.8814],\n",
      "        [0.2505, 0.7495]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3749210834503174\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  33%|█████████▎                  | 17/51 [00:39<01:20,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.9923, 0.0077],\n",
      "        [0.2198, 0.7802],\n",
      "        [0.2502, 0.7498],\n",
      "        [0.2473, 0.7527],\n",
      "        [0.2450, 0.7550],\n",
      "        [0.3198, 0.6802],\n",
      "        [0.2365, 0.7635],\n",
      "        [0.2447, 0.7553],\n",
      "        [0.6938, 0.3062],\n",
      "        [0.2391, 0.7609],\n",
      "        [0.2405, 0.7595],\n",
      "        [0.0939, 0.9061],\n",
      "        [0.2566, 0.7434],\n",
      "        [0.2604, 0.7396],\n",
      "        [0.3195, 0.6805],\n",
      "        [0.3214, 0.6786]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.34909892082214355\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  35%|█████████▉                  | 18/51 [00:42<01:18,  2.37s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2796, 0.7204],\n",
      "        [0.9398, 0.0602],\n",
      "        [0.2312, 0.7688],\n",
      "        [0.2697, 0.7303],\n",
      "        [0.1831, 0.8169],\n",
      "        [0.2349, 0.7651],\n",
      "        [0.2285, 0.7715],\n",
      "        [0.2419, 0.7581],\n",
      "        [0.2428, 0.7572],\n",
      "        [0.2646, 0.7354],\n",
      "        [0.3261, 0.6739],\n",
      "        [0.2508, 0.7492],\n",
      "        [0.2411, 0.7589],\n",
      "        [0.9730, 0.0270],\n",
      "        [0.3530, 0.6470],\n",
      "        [0.2930, 0.7070]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.35371628403663635\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  37%|██████████▍                 | 19/51 [00:44<01:15,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.9766, 0.0234],\n",
      "        [0.2381, 0.7619],\n",
      "        [0.2319, 0.7681],\n",
      "        [0.3145, 0.6855],\n",
      "        [0.0062, 0.9938],\n",
      "        [0.3012, 0.6988],\n",
      "        [0.2515, 0.7485],\n",
      "        [0.3150, 0.6850],\n",
      "        [0.2559, 0.7441],\n",
      "        [0.5228, 0.4772],\n",
      "        [0.1923, 0.8077],\n",
      "        [0.2926, 0.7074],\n",
      "        [0.2554, 0.7446],\n",
      "        [0.2399, 0.7601],\n",
      "        [0.2341, 0.7659],\n",
      "        [0.3436, 0.6564]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6703071594238281\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  39%|██████████▉                 | 20/51 [00:46<01:12,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1563, 0.8437],\n",
      "        [0.9934, 0.0066],\n",
      "        [0.2255, 0.7745],\n",
      "        [0.2607, 0.7393],\n",
      "        [0.2911, 0.7089],\n",
      "        [0.4446, 0.5554],\n",
      "        [0.2266, 0.7734],\n",
      "        [0.2195, 0.7805],\n",
      "        [0.1232, 0.8768],\n",
      "        [0.8958, 0.1042],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.0319, 0.9681],\n",
      "        [0.2438, 0.7562],\n",
      "        [0.2951, 0.7049],\n",
      "        [0.3208, 0.6792],\n",
      "        [0.2149, 0.7851]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7441282272338867\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  41%|███████████▌                | 21/51 [00:49<01:10,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1375, 0.8625],\n",
      "        [0.0677, 0.9323],\n",
      "        [0.2579, 0.7421],\n",
      "        [0.2382, 0.7618],\n",
      "        [0.0528, 0.9472],\n",
      "        [0.2299, 0.7701],\n",
      "        [0.3082, 0.6918],\n",
      "        [0.2759, 0.7241],\n",
      "        [0.2340, 0.7660],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.2313, 0.7687],\n",
      "        [0.2400, 0.7600],\n",
      "        [0.2292, 0.7708],\n",
      "        [0.2793, 0.7207],\n",
      "        [0.2639, 0.7361],\n",
      "        [0.6885, 0.3115]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7043605446815491\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  43%|████████████                | 22/51 [00:51<01:08,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2476, 0.7524],\n",
      "        [0.2543, 0.7457],\n",
      "        [0.2335, 0.7665],\n",
      "        [0.2406, 0.7594],\n",
      "        [0.2265, 0.7735],\n",
      "        [0.2547, 0.7453],\n",
      "        [0.2408, 0.7592],\n",
      "        [0.3447, 0.6553],\n",
      "        [0.1731, 0.8269],\n",
      "        [0.2626, 0.7374],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.2518, 0.7482],\n",
      "        [0.1260, 0.8740],\n",
      "        [0.1381, 0.8619],\n",
      "        [0.2339, 0.7661],\n",
      "        [0.1996, 0.8004]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.45027849078178406\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  45%|████████████▋               | 23/51 [00:53<01:06,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2582, 0.7418],\n",
      "        [0.2648, 0.7352],\n",
      "        [0.2633, 0.7367],\n",
      "        [0.1776, 0.8224],\n",
      "        [0.2378, 0.7622],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.3036, 0.6964],\n",
      "        [0.2163, 0.7837],\n",
      "        [0.2770, 0.7230],\n",
      "        [0.2233, 0.7767],\n",
      "        [0.2621, 0.7379],\n",
      "        [0.4530, 0.5470],\n",
      "        [0.3363, 0.6637],\n",
      "        [0.0064, 0.9936],\n",
      "        [0.2822, 0.7178],\n",
      "        [0.2499, 0.7501]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.33713066577911377\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  47%|█████████████▏              | 24/51 [00:56<01:03,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2391, 0.7609],\n",
      "        [0.2631, 0.7369],\n",
      "        [0.1952, 0.8048],\n",
      "        [0.2293, 0.7707],\n",
      "        [0.3321, 0.6679],\n",
      "        [0.4772, 0.5228],\n",
      "        [0.2478, 0.7522],\n",
      "        [0.2094, 0.7906],\n",
      "        [0.0955, 0.9045],\n",
      "        [0.2455, 0.7545],\n",
      "        [0.2555, 0.7445],\n",
      "        [0.0806, 0.9194],\n",
      "        [0.2536, 0.7464],\n",
      "        [0.2527, 0.7473],\n",
      "        [0.2475, 0.7525],\n",
      "        [0.2284, 0.7716]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7324075102806091\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  49%|█████████████▋              | 25/51 [00:58<01:00,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2072, 0.7928],\n",
      "        [0.0124, 0.9876],\n",
      "        [0.1974, 0.8026],\n",
      "        [0.0555, 0.9445],\n",
      "        [0.2347, 0.7653],\n",
      "        [0.1936, 0.8064],\n",
      "        [0.2082, 0.7918],\n",
      "        [0.2285, 0.7715],\n",
      "        [0.0033, 0.9967],\n",
      "        [0.2725, 0.7275],\n",
      "        [0.2345, 0.7655],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.2190, 0.7810],\n",
      "        [0.2027, 0.7973],\n",
      "        [0.2378, 0.7622],\n",
      "        [0.2203, 0.7797]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.45904165506362915\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  51%|██████████████▎             | 26/51 [01:00<00:58,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.5108, 0.4892],\n",
      "        [0.2201, 0.7799],\n",
      "        [0.2531, 0.7469],\n",
      "        [0.2075, 0.7925],\n",
      "        [0.2120, 0.7880],\n",
      "        [0.1490, 0.8510],\n",
      "        [0.2222, 0.7778],\n",
      "        [0.2282, 0.7718],\n",
      "        [0.2107, 0.7893],\n",
      "        [0.0655, 0.9345],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.9734, 0.0266],\n",
      "        [0.2314, 0.7686],\n",
      "        [0.1922, 0.8078],\n",
      "        [0.1987, 0.8013],\n",
      "        [0.2235, 0.7765]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.32595109939575195\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  53%|██████████████▊             | 27/51 [01:03<00:56,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4782, 0.5218],\n",
      "        [0.8697, 0.1303],\n",
      "        [0.2042, 0.7958],\n",
      "        [0.2316, 0.7684],\n",
      "        [0.2392, 0.7608],\n",
      "        [0.1909, 0.8091],\n",
      "        [0.2394, 0.7606],\n",
      "        [0.2344, 0.7656],\n",
      "        [0.2430, 0.7570],\n",
      "        [0.2219, 0.7781],\n",
      "        [0.2190, 0.7810],\n",
      "        [0.1712, 0.8288],\n",
      "        [0.1269, 0.8731],\n",
      "        [0.2786, 0.7214],\n",
      "        [0.2590, 0.7410],\n",
      "        [0.4884, 0.5116]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.47762471437454224\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  55%|███████████████▎            | 28/51 [01:05<00:54,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2292, 0.7708],\n",
      "        [0.2333, 0.7667],\n",
      "        [0.2303, 0.7697],\n",
      "        [0.2410, 0.7590],\n",
      "        [0.1414, 0.8586],\n",
      "        [0.2317, 0.7683],\n",
      "        [0.1809, 0.8191],\n",
      "        [0.0222, 0.9778],\n",
      "        [0.1822, 0.8178],\n",
      "        [0.1966, 0.8034],\n",
      "        [0.3653, 0.6347],\n",
      "        [0.0079, 0.9921],\n",
      "        [0.0635, 0.9365],\n",
      "        [0.4608, 0.5392],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.2362, 0.7638]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.43979984521865845\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  57%|███████████████▉            | 29/51 [01:07<00:52,  2.37s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2577, 0.7423],\n",
      "        [0.2550, 0.7450],\n",
      "        [0.1817, 0.8183],\n",
      "        [0.1439, 0.8561],\n",
      "        [0.2237, 0.7763],\n",
      "        [0.1814, 0.8186],\n",
      "        [0.1920, 0.8080],\n",
      "        [0.1911, 0.8089],\n",
      "        [0.3155, 0.6845],\n",
      "        [0.0802, 0.9198],\n",
      "        [0.2491, 0.7509],\n",
      "        [0.2063, 0.7937],\n",
      "        [0.1646, 0.8354],\n",
      "        [0.0640, 0.9360],\n",
      "        [0.2484, 0.7516],\n",
      "        [0.1811, 0.8189]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5306289196014404\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  59%|████████████████▍           | 30/51 [01:10<00:50,  2.39s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2134, 0.7866],\n",
      "        [0.1910, 0.8090],\n",
      "        [0.2724, 0.7276],\n",
      "        [0.2144, 0.7856],\n",
      "        [0.2112, 0.7888],\n",
      "        [0.2102, 0.7898],\n",
      "        [0.1992, 0.8008],\n",
      "        [0.2731, 0.7269],\n",
      "        [0.2102, 0.7898],\n",
      "        [0.1336, 0.8664],\n",
      "        [0.2167, 0.7833],\n",
      "        [0.2352, 0.7648],\n",
      "        [0.2136, 0.7864],\n",
      "        [0.1661, 0.8339],\n",
      "        [0.2112, 0.7888],\n",
      "        [0.2170, 0.7830]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5240678191184998\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  61%|█████████████████           | 31/51 [01:12<00:48,  2.40s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.6917, 0.3083],\n",
      "        [0.2507, 0.7493],\n",
      "        [0.2259, 0.7741],\n",
      "        [0.1809, 0.8191],\n",
      "        [0.1357, 0.8643],\n",
      "        [0.2263, 0.7737],\n",
      "        [0.2373, 0.7627],\n",
      "        [0.2576, 0.7424],\n",
      "        [0.2257, 0.7743],\n",
      "        [0.1730, 0.8270],\n",
      "        [0.2250, 0.7750],\n",
      "        [0.2303, 0.7697],\n",
      "        [0.2003, 0.7997],\n",
      "        [0.2041, 0.7959],\n",
      "        [0.1202, 0.8798],\n",
      "        [0.2049, 0.7951]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5179411768913269\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  63%|█████████████████▌          | 32/51 [01:15<00:45,  2.39s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3345, 0.6655],\n",
      "        [0.2005, 0.7995],\n",
      "        [0.2037, 0.7963],\n",
      "        [0.1908, 0.8092],\n",
      "        [0.2129, 0.7871],\n",
      "        [0.2011, 0.7989],\n",
      "        [0.2134, 0.7866],\n",
      "        [0.2188, 0.7812],\n",
      "        [0.2314, 0.7686],\n",
      "        [0.2188, 0.7812],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.0443, 0.9557],\n",
      "        [0.1693, 0.8307],\n",
      "        [0.2259, 0.7741],\n",
      "        [0.2109, 0.7891],\n",
      "        [0.2024, 0.7976]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6089388728141785\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  65%|██████████████████          | 33/51 [01:17<00:43,  2.40s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2193, 0.7807],\n",
      "        [0.1754, 0.8246],\n",
      "        [0.2222, 0.7778],\n",
      "        [0.2353, 0.7647],\n",
      "        [0.2233, 0.7767],\n",
      "        [0.2241, 0.7759],\n",
      "        [0.2356, 0.7644],\n",
      "        [0.3950, 0.6050],\n",
      "        [0.2248, 0.7752],\n",
      "        [0.0177, 0.9823],\n",
      "        [0.1817, 0.8183],\n",
      "        [0.1295, 0.8705],\n",
      "        [0.2291, 0.7709],\n",
      "        [0.2235, 0.7765],\n",
      "        [0.2664, 0.7336],\n",
      "        [0.1116, 0.8884]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.84483802318573\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  67%|██████████████████▋         | 34/51 [01:19<00:40,  2.40s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2437, 0.7563],\n",
      "        [0.9920, 0.0080],\n",
      "        [0.2193, 0.7807],\n",
      "        [0.2144, 0.7856],\n",
      "        [0.1619, 0.8381],\n",
      "        [0.0229, 0.9771],\n",
      "        [0.1446, 0.8554],\n",
      "        [0.0902, 0.9098],\n",
      "        [0.2245, 0.7755],\n",
      "        [0.1493, 0.8507],\n",
      "        [0.3765, 0.6235],\n",
      "        [0.2222, 0.7778],\n",
      "        [0.2156, 0.7844],\n",
      "        [0.2444, 0.7556],\n",
      "        [0.4635, 0.5365],\n",
      "        [0.2197, 0.7803]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6579245328903198\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  69%|███████████████████▏        | 35/51 [01:22<00:38,  2.42s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2209, 0.7791],\n",
      "        [0.1801, 0.8199],\n",
      "        [0.2373, 0.7627],\n",
      "        [0.1539, 0.8461],\n",
      "        [0.0188, 0.9812],\n",
      "        [0.2625, 0.7375],\n",
      "        [0.1868, 0.8132],\n",
      "        [0.1690, 0.8310],\n",
      "        [0.1450, 0.8550],\n",
      "        [0.2141, 0.7859],\n",
      "        [0.0970, 0.9030],\n",
      "        [0.2261, 0.7739],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.2744, 0.7256],\n",
      "        [0.2172, 0.7828],\n",
      "        [0.2294, 0.7706]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5748203992843628\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  71%|███████████████████▊        | 36/51 [01:24<00:36,  2.42s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.1768e-01, 7.8232e-01],\n",
      "        [2.2817e-01, 7.7183e-01],\n",
      "        [1.4159e-01, 8.5841e-01],\n",
      "        [5.0901e-02, 9.4910e-01],\n",
      "        [2.3537e-01, 7.6463e-01],\n",
      "        [2.4155e-01, 7.5845e-01],\n",
      "        [8.3286e-04, 9.9917e-01],\n",
      "        [3.4530e-01, 6.5470e-01],\n",
      "        [2.2481e-01, 7.7519e-01],\n",
      "        [2.0511e-01, 7.9489e-01],\n",
      "        [2.0539e-01, 7.9461e-01],\n",
      "        [2.2865e-01, 7.7135e-01],\n",
      "        [2.3645e-01, 7.6355e-01],\n",
      "        [1.3729e-01, 8.6271e-01],\n",
      "        [2.3402e-01, 7.6598e-01],\n",
      "        [2.1496e-01, 7.8504e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.49918586015701294\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  73%|████████████████████▎       | 37/51 [01:27<00:33,  2.38s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2522, 0.7478],\n",
      "        [0.2288, 0.7712],\n",
      "        [0.1995, 0.8005],\n",
      "        [0.1900, 0.8100],\n",
      "        [0.2389, 0.7611],\n",
      "        [0.2067, 0.7933],\n",
      "        [0.2478, 0.7522],\n",
      "        [0.2718, 0.7282],\n",
      "        [0.2378, 0.7622],\n",
      "        [0.2048, 0.7952],\n",
      "        [0.2356, 0.7644],\n",
      "        [0.2342, 0.7658],\n",
      "        [0.2140, 0.7860],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.2458, 0.7542],\n",
      "        [0.2361, 0.7639]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.48142606019973755\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  75%|████████████████████▊       | 38/51 [01:29<00:30,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2316, 0.7684],\n",
      "        [0.1877, 0.8123],\n",
      "        [0.2001, 0.7999],\n",
      "        [0.2307, 0.7693],\n",
      "        [0.2663, 0.7337],\n",
      "        [0.5538, 0.4462],\n",
      "        [0.2171, 0.7829],\n",
      "        [0.2369, 0.7631],\n",
      "        [0.2340, 0.7660],\n",
      "        [0.2113, 0.7887],\n",
      "        [0.2327, 0.7673],\n",
      "        [0.0656, 0.9344],\n",
      "        [0.2256, 0.7744],\n",
      "        [0.1711, 0.8289],\n",
      "        [0.2336, 0.7664],\n",
      "        [0.1531, 0.8469]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3284091055393219\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  76%|█████████████████████▍      | 39/51 [01:31<00:27,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2145, 0.7855],\n",
      "        [0.2401, 0.7599],\n",
      "        [0.0462, 0.9538],\n",
      "        [0.4902, 0.5098],\n",
      "        [0.2164, 0.7836],\n",
      "        [0.2598, 0.7402],\n",
      "        [0.1806, 0.8194],\n",
      "        [0.0425, 0.9575],\n",
      "        [0.1068, 0.8932],\n",
      "        [0.2372, 0.7628],\n",
      "        [0.2312, 0.7688],\n",
      "        [0.2632, 0.7368],\n",
      "        [0.2422, 0.7578],\n",
      "        [0.2351, 0.7649],\n",
      "        [0.6404, 0.3596],\n",
      "        [0.2350, 0.7650]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4395548701286316\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  78%|█████████████████████▉      | 40/51 [01:33<00:25,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1739, 0.8261],\n",
      "        [0.2943, 0.7057],\n",
      "        [0.0053, 0.9947],\n",
      "        [0.0984, 0.9016],\n",
      "        [0.5738, 0.4262],\n",
      "        [0.2600, 0.7400],\n",
      "        [0.2336, 0.7664],\n",
      "        [0.2214, 0.7786],\n",
      "        [0.1515, 0.8485],\n",
      "        [0.3350, 0.6650],\n",
      "        [0.2005, 0.7995],\n",
      "        [0.4863, 0.5137],\n",
      "        [0.2275, 0.7725],\n",
      "        [0.2240, 0.7760],\n",
      "        [0.1362, 0.8638],\n",
      "        [0.3771, 0.6229]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.36485040187835693\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  80%|██████████████████████▌     | 41/51 [01:36<00:22,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2182, 0.7818],\n",
      "        [0.9274, 0.0726],\n",
      "        [0.0623, 0.9377],\n",
      "        [0.2279, 0.7721],\n",
      "        [0.1053, 0.8947],\n",
      "        [0.1345, 0.8655],\n",
      "        [0.2291, 0.7709],\n",
      "        [0.2180, 0.7820],\n",
      "        [0.2282, 0.7718],\n",
      "        [0.0463, 0.9537],\n",
      "        [0.3005, 0.6995],\n",
      "        [0.2284, 0.7716],\n",
      "        [0.2001, 0.7999],\n",
      "        [0.2309, 0.7691],\n",
      "        [0.3728, 0.6272],\n",
      "        [0.2344, 0.7656]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.729299783706665\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  82%|███████████████████████     | 42/51 [01:38<00:20,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2492, 0.7508],\n",
      "        [0.2225, 0.7775],\n",
      "        [0.3071, 0.6929],\n",
      "        [0.2335, 0.7665],\n",
      "        [0.2288, 0.7712],\n",
      "        [0.0032, 0.9968],\n",
      "        [0.0553, 0.9447],\n",
      "        [0.2020, 0.7980],\n",
      "        [0.2149, 0.7851],\n",
      "        [0.2286, 0.7714],\n",
      "        [0.2458, 0.7542],\n",
      "        [0.2126, 0.7874],\n",
      "        [0.1978, 0.8022],\n",
      "        [0.4324, 0.5676],\n",
      "        [0.2988, 0.7012],\n",
      "        [0.2475, 0.7525]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4990721344947815\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  84%|███████████████████████▌    | 43/51 [01:40<00:18,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0888, 0.9112],\n",
      "        [0.2274, 0.7726],\n",
      "        [0.2769, 0.7231],\n",
      "        [0.1519, 0.8481],\n",
      "        [0.2066, 0.7934],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.1944, 0.8056],\n",
      "        [0.0069, 0.9931],\n",
      "        [0.2460, 0.7540],\n",
      "        [0.2321, 0.7679],\n",
      "        [0.2300, 0.7700],\n",
      "        [0.2077, 0.7923],\n",
      "        [0.2027, 0.7973],\n",
      "        [0.2334, 0.7666],\n",
      "        [0.7603, 0.2397],\n",
      "        [0.2327, 0.7673]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5922718644142151\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  86%|████████████████████████▏   | 44/51 [01:43<00:15,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2257, 0.7743],\n",
      "        [0.2220, 0.7780],\n",
      "        [0.2369, 0.7631],\n",
      "        [0.2324, 0.7676],\n",
      "        [0.2334, 0.7666],\n",
      "        [0.2244, 0.7756],\n",
      "        [0.2235, 0.7765],\n",
      "        [0.4652, 0.5348],\n",
      "        [0.1052, 0.8948],\n",
      "        [0.3062, 0.6938],\n",
      "        [0.2377, 0.7623],\n",
      "        [0.2453, 0.7547],\n",
      "        [0.2602, 0.7398],\n",
      "        [0.2383, 0.7617],\n",
      "        [0.0926, 0.9074],\n",
      "        [0.6626, 0.3374]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6487093567848206\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  88%|████████████████████████▋   | 45/51 [01:45<00:13,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.5770, 0.4230],\n",
      "        [0.2261, 0.7739],\n",
      "        [0.2643, 0.7357],\n",
      "        [0.3699, 0.6301],\n",
      "        [0.1457, 0.8543],\n",
      "        [0.2312, 0.7688],\n",
      "        [0.1865, 0.8135],\n",
      "        [0.2313, 0.7687],\n",
      "        [0.2808, 0.7192],\n",
      "        [0.7193, 0.2807],\n",
      "        [0.1066, 0.8934],\n",
      "        [0.2382, 0.7618],\n",
      "        [0.2184, 0.7816],\n",
      "        [0.2214, 0.7786],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.1643, 0.8357]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.418768048286438\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  90%|█████████████████████████▎  | 46/51 [01:47<00:11,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2263, 0.7737],\n",
      "        [0.3312, 0.6688],\n",
      "        [0.3936, 0.6064],\n",
      "        [0.2332, 0.7668],\n",
      "        [0.0608, 0.9392],\n",
      "        [0.2287, 0.7713],\n",
      "        [0.2126, 0.7874],\n",
      "        [0.2337, 0.7663],\n",
      "        [0.1295, 0.8705],\n",
      "        [0.2204, 0.7796],\n",
      "        [0.1918, 0.8082],\n",
      "        [0.2226, 0.7774],\n",
      "        [0.2266, 0.7734],\n",
      "        [0.1839, 0.8161],\n",
      "        [0.2249, 0.7751],\n",
      "        [0.2070, 0.7930]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6344403624534607\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  92%|█████████████████████████▊  | 47/51 [01:49<00:09,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2663, 0.7337],\n",
      "        [0.2099, 0.7901],\n",
      "        [0.3693, 0.6307],\n",
      "        [0.6747, 0.3253],\n",
      "        [0.2709, 0.7291],\n",
      "        [0.2553, 0.7447],\n",
      "        [0.2214, 0.7786],\n",
      "        [0.2268, 0.7732],\n",
      "        [0.2511, 0.7489],\n",
      "        [0.2384, 0.7616],\n",
      "        [0.1340, 0.8660],\n",
      "        [0.2457, 0.7543],\n",
      "        [0.2384, 0.7616],\n",
      "        [0.0303, 0.9697],\n",
      "        [0.0850, 0.9150],\n",
      "        [0.2682, 0.7318]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5392235517501831\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  94%|██████████████████████████▎ | 48/51 [01:52<00:06,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2258, 0.7742],\n",
      "        [0.2752, 0.7248],\n",
      "        [0.9920, 0.0080],\n",
      "        [0.0619, 0.9381],\n",
      "        [0.9955, 0.0045],\n",
      "        [0.2944, 0.7056],\n",
      "        [0.0650, 0.9350],\n",
      "        [0.2079, 0.7921],\n",
      "        [0.2437, 0.7563],\n",
      "        [0.2406, 0.7594],\n",
      "        [0.2501, 0.7499],\n",
      "        [0.3079, 0.6921],\n",
      "        [0.2867, 0.7133],\n",
      "        [0.1746, 0.8254],\n",
      "        [0.2660, 0.7340],\n",
      "        [0.2313, 0.7687]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4200262427330017\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  96%|██████████████████████████▉ | 49/51 [01:54<00:04,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.5564, 0.4436],\n",
      "        [0.2651, 0.7349],\n",
      "        [0.2497, 0.7503],\n",
      "        [0.1203, 0.8797],\n",
      "        [0.3099, 0.6901],\n",
      "        [0.2679, 0.7321],\n",
      "        [0.2494, 0.7506],\n",
      "        [0.2427, 0.7573],\n",
      "        [0.3539, 0.6461],\n",
      "        [0.4051, 0.5949],\n",
      "        [0.2645, 0.7355],\n",
      "        [0.2317, 0.7683],\n",
      "        [0.2469, 0.7531],\n",
      "        [0.2493, 0.7507],\n",
      "        [0.2578, 0.7422],\n",
      "        [0.1948, 0.8052]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5375463962554932\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10:  98%|███████████████████████████▍| 50/51 [01:56<00:02,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1738, 0.8262],\n",
      "        [0.2977, 0.7023]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.7013231515884399\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/10: 100%|████████████████████████████| 51/51 [01:57<00:00,  2.30s/batch]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total_loss:27.405659824609756\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:   0%|                                     | 0/51 [00:00<?, ?batch/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2467, 0.7533],\n",
      "        [0.2485, 0.7515],\n",
      "        [0.2358, 0.7642],\n",
      "        [0.2355, 0.7645],\n",
      "        [0.2775, 0.7225],\n",
      "        [0.2542, 0.7458],\n",
      "        [0.2199, 0.7801],\n",
      "        [0.3168, 0.6832],\n",
      "        [0.5204, 0.4796],\n",
      "        [0.2967, 0.7033],\n",
      "        [0.2928, 0.7072],\n",
      "        [0.8146, 0.1854],\n",
      "        [0.0500, 0.9500],\n",
      "        [0.2398, 0.7602],\n",
      "        [0.2824, 0.7176],\n",
      "        [0.2701, 0.7299]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.43941736221313477\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:   2%|▌                            | 1/51 [00:02<01:59,  2.38s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.2964e-01, 7.7036e-01],\n",
      "        [2.1804e-01, 7.8196e-01],\n",
      "        [3.3364e-01, 6.6636e-01],\n",
      "        [3.0521e-01, 6.9479e-01],\n",
      "        [2.3468e-01, 7.6532e-01],\n",
      "        [2.2190e-01, 7.7810e-01],\n",
      "        [2.3368e-01, 7.6632e-01],\n",
      "        [4.0491e-01, 5.9509e-01],\n",
      "        [4.2575e-04, 9.9957e-01],\n",
      "        [3.2271e-01, 6.7729e-01],\n",
      "        [2.6988e-01, 7.3012e-01],\n",
      "        [7.1806e-01, 2.8194e-01],\n",
      "        [2.1735e-01, 7.8265e-01],\n",
      "        [8.0982e-01, 1.9018e-01],\n",
      "        [4.3929e-01, 5.6071e-01],\n",
      "        [2.8201e-01, 7.1799e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.44546082615852356\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:   4%|█▏                           | 2/51 [00:04<01:54,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2471, 0.7529],\n",
      "        [0.3355, 0.6645],\n",
      "        [0.9977, 0.0023],\n",
      "        [0.3189, 0.6811],\n",
      "        [0.3434, 0.6566],\n",
      "        [0.2590, 0.7410],\n",
      "        [0.2540, 0.7460],\n",
      "        [0.6603, 0.3397],\n",
      "        [0.2877, 0.7123],\n",
      "        [0.2492, 0.7508],\n",
      "        [0.2677, 0.7323],\n",
      "        [0.3032, 0.6968],\n",
      "        [0.3104, 0.6896],\n",
      "        [0.2643, 0.7357],\n",
      "        [0.2706, 0.7294],\n",
      "        [0.2417, 0.7583]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6177018880844116\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:   6%|█▋                           | 3/51 [00:07<01:51,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2448, 0.7552],\n",
      "        [0.9523, 0.0477],\n",
      "        [0.3346, 0.6654],\n",
      "        [0.4372, 0.5628],\n",
      "        [0.2121, 0.7879],\n",
      "        [0.5669, 0.4331],\n",
      "        [0.2252, 0.7748],\n",
      "        [0.3474, 0.6526],\n",
      "        [0.1685, 0.8315],\n",
      "        [0.2737, 0.7263],\n",
      "        [0.2020, 0.7980],\n",
      "        [0.3541, 0.6459],\n",
      "        [0.2656, 0.7344],\n",
      "        [0.2462, 0.7538],\n",
      "        [0.3537, 0.6463],\n",
      "        [0.2314, 0.7686]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5015270113945007\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:   8%|██▎                          | 4/51 [00:09<01:49,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3374, 0.6626],\n",
      "        [0.8828, 0.1172],\n",
      "        [0.2175, 0.7825],\n",
      "        [0.2104, 0.7896],\n",
      "        [0.2515, 0.7485],\n",
      "        [0.2300, 0.7700],\n",
      "        [0.2167, 0.7833],\n",
      "        [0.1872, 0.8128],\n",
      "        [0.2530, 0.7470],\n",
      "        [0.1525, 0.8475],\n",
      "        [0.3516, 0.6484],\n",
      "        [0.3406, 0.6594],\n",
      "        [0.2305, 0.7695],\n",
      "        [0.3110, 0.6890],\n",
      "        [0.3563, 0.6437],\n",
      "        [0.2065, 0.7935]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4864295721054077\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  10%|██▊                          | 5/51 [00:11<01:46,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3901, 0.6099],\n",
      "        [0.4965, 0.5035],\n",
      "        [0.3278, 0.6722],\n",
      "        [0.2555, 0.7445],\n",
      "        [0.2530, 0.7470],\n",
      "        [0.3066, 0.6934],\n",
      "        [0.2466, 0.7534],\n",
      "        [0.2929, 0.7071],\n",
      "        [0.3376, 0.6624],\n",
      "        [0.2827, 0.7173],\n",
      "        [0.2565, 0.7435],\n",
      "        [0.2801, 0.7199],\n",
      "        [0.3098, 0.6902],\n",
      "        [0.2945, 0.7055],\n",
      "        [0.2770, 0.7230],\n",
      "        [0.3100, 0.6900]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6020658612251282\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  12%|███▍                         | 6/51 [00:13<01:43,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1955, 0.8045],\n",
      "        [0.2905, 0.7095],\n",
      "        [0.2270, 0.7730],\n",
      "        [0.2315, 0.7685],\n",
      "        [0.2272, 0.7728],\n",
      "        [0.2466, 0.7534],\n",
      "        [0.3313, 0.6687],\n",
      "        [0.2810, 0.7190],\n",
      "        [0.2199, 0.7801],\n",
      "        [0.2248, 0.7752],\n",
      "        [0.3147, 0.6853],\n",
      "        [0.0992, 0.9008],\n",
      "        [0.5935, 0.4065],\n",
      "        [0.9873, 0.0127],\n",
      "        [0.3078, 0.6922],\n",
      "        [0.2968, 0.7032]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6297481060028076\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  14%|███▉                         | 7/51 [00:16<01:41,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2961, 0.7039],\n",
      "        [0.1856, 0.8144],\n",
      "        [0.2303, 0.7697],\n",
      "        [0.2572, 0.7428],\n",
      "        [0.3014, 0.6986],\n",
      "        [0.2365, 0.7635],\n",
      "        [0.3097, 0.6903],\n",
      "        [0.2388, 0.7612],\n",
      "        [0.5206, 0.4794],\n",
      "        [0.3180, 0.6820],\n",
      "        [0.2914, 0.7086],\n",
      "        [0.3139, 0.6861],\n",
      "        [0.3063, 0.6937],\n",
      "        [0.1890, 0.8110],\n",
      "        [0.2341, 0.7659],\n",
      "        [0.1391, 0.8609]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6427366137504578\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  16%|████▌                        | 8/51 [00:18<01:40,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4480, 0.5520],\n",
      "        [0.2067, 0.7933],\n",
      "        [0.9628, 0.0372],\n",
      "        [0.2379, 0.7621],\n",
      "        [0.3294, 0.6706],\n",
      "        [0.2479, 0.7521],\n",
      "        [0.2349, 0.7651],\n",
      "        [0.3108, 0.6892],\n",
      "        [0.2734, 0.7266],\n",
      "        [0.1996, 0.8004],\n",
      "        [0.3720, 0.6280],\n",
      "        [0.4050, 0.5950],\n",
      "        [0.1992, 0.8008],\n",
      "        [0.2382, 0.7618],\n",
      "        [0.2506, 0.7494],\n",
      "        [0.2725, 0.7275]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.52015221118927\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  18%|█████                        | 9/51 [00:20<01:38,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2884, 0.7116],\n",
      "        [0.2078, 0.7922],\n",
      "        [0.2423, 0.7577],\n",
      "        [0.2508, 0.7492],\n",
      "        [0.2532, 0.7468],\n",
      "        [0.2654, 0.7346],\n",
      "        [0.2950, 0.7050],\n",
      "        [0.2722, 0.7278],\n",
      "        [0.2649, 0.7351],\n",
      "        [0.2337, 0.7663],\n",
      "        [0.3138, 0.6862],\n",
      "        [0.2514, 0.7486],\n",
      "        [0.2845, 0.7155],\n",
      "        [0.2370, 0.7630],\n",
      "        [0.2235, 0.7765],\n",
      "        [0.1817, 0.8183]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3671954274177551\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  20%|█████▍                      | 10/51 [00:23<01:35,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2348, 0.7652],\n",
      "        [0.2491, 0.7509],\n",
      "        [0.2252, 0.7748],\n",
      "        [0.2254, 0.7746],\n",
      "        [0.0994, 0.9006],\n",
      "        [0.2746, 0.7254],\n",
      "        [0.2365, 0.7635],\n",
      "        [0.2445, 0.7555],\n",
      "        [0.0061, 0.9939],\n",
      "        [0.8854, 0.1146],\n",
      "        [0.2404, 0.7596],\n",
      "        [0.2323, 0.7677],\n",
      "        [0.2432, 0.7568],\n",
      "        [0.2282, 0.7718],\n",
      "        [0.2984, 0.7016],\n",
      "        [0.2626, 0.7374]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.498071551322937\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  22%|██████                      | 11/51 [00:25<01:33,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0155, 0.9845],\n",
      "        [0.4035, 0.5965],\n",
      "        [0.2295, 0.7705],\n",
      "        [0.2342, 0.7658],\n",
      "        [0.2212, 0.7788],\n",
      "        [0.2702, 0.7298],\n",
      "        [0.0065, 0.9935],\n",
      "        [0.2630, 0.7370],\n",
      "        [0.2537, 0.7463],\n",
      "        [0.4343, 0.5657],\n",
      "        [0.2193, 0.7807],\n",
      "        [0.1502, 0.8498],\n",
      "        [0.2506, 0.7494],\n",
      "        [0.3035, 0.6965],\n",
      "        [0.2438, 0.7562],\n",
      "        [0.2196, 0.7804]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.42355144023895264\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  24%|██████▌                     | 12/51 [00:27<01:30,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1139, 0.8861],\n",
      "        [0.4858, 0.5142],\n",
      "        [0.0916, 0.9084],\n",
      "        [0.1897, 0.8103],\n",
      "        [0.2277, 0.7723],\n",
      "        [0.2197, 0.7803],\n",
      "        [0.2040, 0.7960],\n",
      "        [0.2691, 0.7309],\n",
      "        [0.2247, 0.7753],\n",
      "        [0.2477, 0.7523],\n",
      "        [0.1617, 0.8383],\n",
      "        [0.1929, 0.8071],\n",
      "        [0.2238, 0.7762],\n",
      "        [0.0645, 0.9355],\n",
      "        [0.8140, 0.1860],\n",
      "        [0.1847, 0.8153]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.566707968711853\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  25%|███████▏                    | 13/51 [00:30<01:27,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2120, 0.7880],\n",
      "        [0.9339, 0.0661],\n",
      "        [0.2353, 0.7647],\n",
      "        [0.2204, 0.7796],\n",
      "        [0.3721, 0.6279],\n",
      "        [0.2226, 0.7774],\n",
      "        [0.3979, 0.6021],\n",
      "        [0.1547, 0.8453],\n",
      "        [0.4321, 0.5679],\n",
      "        [0.2221, 0.7779],\n",
      "        [0.2242, 0.7758],\n",
      "        [0.0283, 0.9717],\n",
      "        [0.2112, 0.7888],\n",
      "        [0.0812, 0.9188],\n",
      "        [0.2043, 0.7957],\n",
      "        [0.1477, 0.8523]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5975692868232727\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  27%|███████▋                    | 14/51 [00:32<01:25,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2249, 0.7751],\n",
      "        [0.0261, 0.9739],\n",
      "        [0.0549, 0.9451],\n",
      "        [0.0457, 0.9543],\n",
      "        [0.2236, 0.7764],\n",
      "        [0.2163, 0.7837],\n",
      "        [0.2308, 0.7692],\n",
      "        [0.2211, 0.7789],\n",
      "        [0.2348, 0.7652],\n",
      "        [0.0538, 0.9462],\n",
      "        [0.2111, 0.7889],\n",
      "        [0.2302, 0.7698],\n",
      "        [0.4991, 0.5009],\n",
      "        [0.2542, 0.7458],\n",
      "        [0.2033, 0.7967],\n",
      "        [0.2746, 0.7254]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.46460771560668945\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  29%|████████▏                   | 15/51 [00:34<01:23,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2158, 0.7842],\n",
      "        [0.7263, 0.2737],\n",
      "        [0.1435, 0.8565],\n",
      "        [0.2333, 0.7667],\n",
      "        [0.2277, 0.7723],\n",
      "        [0.2684, 0.7316],\n",
      "        [0.1991, 0.8009],\n",
      "        [0.2545, 0.7455],\n",
      "        [0.2336, 0.7664],\n",
      "        [0.2015, 0.7985],\n",
      "        [0.0828, 0.9172],\n",
      "        [0.2014, 0.7986],\n",
      "        [0.2501, 0.7499],\n",
      "        [0.0131, 0.9869],\n",
      "        [0.2366, 0.7634],\n",
      "        [0.1768, 0.8232]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5308484435081482\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  31%|████████▊                   | 16/51 [00:37<01:21,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2274, 0.7726],\n",
      "        [0.2745, 0.7255],\n",
      "        [0.2348, 0.7652],\n",
      "        [0.2517, 0.7483],\n",
      "        [0.2458, 0.7542],\n",
      "        [0.2266, 0.7734],\n",
      "        [0.2433, 0.7567],\n",
      "        [0.1988, 0.8012],\n",
      "        [0.2476, 0.7524],\n",
      "        [0.2326, 0.7674],\n",
      "        [0.2307, 0.7693],\n",
      "        [0.2315, 0.7685],\n",
      "        [0.1141, 0.8859],\n",
      "        [0.2250, 0.7750],\n",
      "        [0.1675, 0.8325],\n",
      "        [0.3286, 0.6714]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.2630177140235901\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  33%|█████████▎                  | 17/51 [00:39<01:19,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2636, 0.7364],\n",
      "        [0.2228, 0.7772],\n",
      "        [0.2198, 0.7802],\n",
      "        [0.3930, 0.6070],\n",
      "        [0.0884, 0.9116],\n",
      "        [0.0742, 0.9258],\n",
      "        [0.2346, 0.7654],\n",
      "        [0.2282, 0.7718],\n",
      "        [0.3939, 0.6061],\n",
      "        [0.2373, 0.7627],\n",
      "        [0.2390, 0.7610],\n",
      "        [0.1957, 0.8043],\n",
      "        [0.2533, 0.7467],\n",
      "        [0.3720, 0.6280],\n",
      "        [0.2527, 0.7473],\n",
      "        [0.2289, 0.7711]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5663015246391296\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  35%|█████████▉                  | 18/51 [00:41<01:17,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2294, 0.7706],\n",
      "        [0.1638, 0.8362],\n",
      "        [0.1220, 0.8780],\n",
      "        [0.2413, 0.7587],\n",
      "        [0.1991, 0.8009],\n",
      "        [0.2436, 0.7564],\n",
      "        [0.2918, 0.7082],\n",
      "        [0.3196, 0.6804],\n",
      "        [0.2455, 0.7545],\n",
      "        [0.5548, 0.4452],\n",
      "        [0.2414, 0.7586],\n",
      "        [0.3061, 0.6939],\n",
      "        [0.2432, 0.7568],\n",
      "        [0.0148, 0.9852],\n",
      "        [0.1727, 0.8273],\n",
      "        [0.2596, 0.7404]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.4961256980895996\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  37%|██████████▍                 | 19/51 [00:44<01:14,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1304, 0.8696],\n",
      "        [0.2683, 0.7317],\n",
      "        [0.2656, 0.7344],\n",
      "        [0.2286, 0.7714],\n",
      "        [0.2249, 0.7751],\n",
      "        [0.8389, 0.1611],\n",
      "        [0.0076, 0.9924],\n",
      "        [0.1793, 0.8207],\n",
      "        [0.2473, 0.7527],\n",
      "        [0.2805, 0.7195],\n",
      "        [0.4470, 0.5530],\n",
      "        [0.1659, 0.8341],\n",
      "        [0.2848, 0.7152],\n",
      "        [0.2029, 0.7971],\n",
      "        [0.2478, 0.7522],\n",
      "        [0.2326, 0.7674]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4934476912021637\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  39%|██████████▉                 | 20/51 [00:46<01:11,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2732, 0.7268],\n",
      "        [0.1075, 0.8925],\n",
      "        [0.2480, 0.7520],\n",
      "        [0.2357, 0.7643],\n",
      "        [0.2492, 0.7508],\n",
      "        [0.2676, 0.7324],\n",
      "        [0.2706, 0.7294],\n",
      "        [0.2486, 0.7514],\n",
      "        [0.2396, 0.7604],\n",
      "        [0.2266, 0.7734],\n",
      "        [0.2487, 0.7513],\n",
      "        [0.9298, 0.0702],\n",
      "        [0.1875, 0.8125],\n",
      "        [0.2142, 0.7858],\n",
      "        [0.3061, 0.6939],\n",
      "        [0.2784, 0.7216]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.35517430305480957\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  41%|███████████▌                | 21/51 [00:48<01:09,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1970, 0.8030],\n",
      "        [0.2321, 0.7679],\n",
      "        [0.2330, 0.7670],\n",
      "        [0.2298, 0.7702],\n",
      "        [0.2506, 0.7494],\n",
      "        [0.4563, 0.5437],\n",
      "        [0.2870, 0.7130],\n",
      "        [0.2237, 0.7763],\n",
      "        [0.2298, 0.7702],\n",
      "        [0.1364, 0.8636],\n",
      "        [0.2159, 0.7841],\n",
      "        [0.2307, 0.7693],\n",
      "        [0.3142, 0.6858],\n",
      "        [0.1140, 0.8860],\n",
      "        [0.1861, 0.8139],\n",
      "        [0.2607, 0.7393]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6265910863876343\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  43%|████████████                | 22/51 [00:51<01:06,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2415, 0.7585],\n",
      "        [0.0559, 0.9441],\n",
      "        [0.0829, 0.9171],\n",
      "        [0.2349, 0.7651],\n",
      "        [0.2807, 0.7193],\n",
      "        [0.3237, 0.6763],\n",
      "        [0.4964, 0.5036],\n",
      "        [0.2419, 0.7581],\n",
      "        [0.1605, 0.8395],\n",
      "        [0.2317, 0.7683],\n",
      "        [0.2275, 0.7725],\n",
      "        [0.1269, 0.8731],\n",
      "        [0.2448, 0.7552],\n",
      "        [0.2420, 0.7580],\n",
      "        [0.8446, 0.1554],\n",
      "        [0.3135, 0.6865]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5201822519302368\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  45%|████████████▋               | 23/51 [00:53<01:04,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2743, 0.7257],\n",
      "        [0.1654, 0.8346],\n",
      "        [0.2941, 0.7059],\n",
      "        [0.2283, 0.7717],\n",
      "        [0.2043, 0.7957],\n",
      "        [0.2575, 0.7425],\n",
      "        [0.2394, 0.7606],\n",
      "        [0.2790, 0.7210],\n",
      "        [0.2275, 0.7725],\n",
      "        [0.2448, 0.7552],\n",
      "        [0.2485, 0.7515],\n",
      "        [0.0940, 0.9060],\n",
      "        [0.3218, 0.6782],\n",
      "        [0.2366, 0.7634],\n",
      "        [0.2635, 0.7365],\n",
      "        [0.2452, 0.7548]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5331094861030579\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  47%|█████████████▏              | 24/51 [00:55<01:01,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4633, 0.5367],\n",
      "        [0.2467, 0.7533],\n",
      "        [0.2316, 0.7684],\n",
      "        [0.2118, 0.7882],\n",
      "        [0.2552, 0.7448],\n",
      "        [0.2543, 0.7457],\n",
      "        [0.2258, 0.7742],\n",
      "        [0.2452, 0.7548],\n",
      "        [0.2927, 0.7073],\n",
      "        [0.0670, 0.9330],\n",
      "        [0.2213, 0.7787],\n",
      "        [0.2145, 0.7855],\n",
      "        [0.2073, 0.7927],\n",
      "        [0.2898, 0.7102],\n",
      "        [0.3680, 0.6320],\n",
      "        [0.2429, 0.7571]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.409366250038147\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  49%|█████████████▋              | 25/51 [00:57<00:59,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.1981e-01, 7.8019e-01],\n",
      "        [2.2494e-01, 7.7506e-01],\n",
      "        [2.9465e-04, 9.9971e-01],\n",
      "        [2.6462e-01, 7.3538e-01],\n",
      "        [2.5032e-01, 7.4968e-01],\n",
      "        [9.7929e-01, 2.0714e-02],\n",
      "        [5.8739e-01, 4.1261e-01],\n",
      "        [1.3697e-01, 8.6303e-01],\n",
      "        [2.3744e-01, 7.6256e-01],\n",
      "        [2.4089e-01, 7.5911e-01],\n",
      "        [1.3626e-01, 8.6374e-01],\n",
      "        [4.4181e-01, 5.5819e-01],\n",
      "        [8.0829e-01, 1.9171e-01],\n",
      "        [2.2883e-01, 7.7117e-01],\n",
      "        [2.0600e-01, 7.9400e-01],\n",
      "        [1.8324e-01, 8.1676e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.24881696701049805\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  51%|██████████████▎             | 26/51 [01:00<00:57,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2364, 0.7636],\n",
      "        [0.2057, 0.7943],\n",
      "        [0.0062, 0.9938],\n",
      "        [0.2284, 0.7716],\n",
      "        [0.8759, 0.1241],\n",
      "        [0.2226, 0.7774],\n",
      "        [0.2049, 0.7951],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.2344, 0.7656],\n",
      "        [0.2278, 0.7722],\n",
      "        [0.2580, 0.7420],\n",
      "        [0.1484, 0.8516],\n",
      "        [0.2334, 0.7666],\n",
      "        [0.2393, 0.7607],\n",
      "        [0.3835, 0.6165],\n",
      "        [0.2194, 0.7806]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.39188140630722046\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  53%|██████████████▊             | 27/51 [01:02<00:54,  2.28s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4112, 0.5888],\n",
      "        [0.3773, 0.6227],\n",
      "        [0.3380, 0.6620],\n",
      "        [0.2482, 0.7518],\n",
      "        [0.2433, 0.7567],\n",
      "        [0.2092, 0.7908],\n",
      "        [0.6378, 0.3622],\n",
      "        [0.0751, 0.9249],\n",
      "        [0.1938, 0.8062],\n",
      "        [0.2285, 0.7715],\n",
      "        [0.1871, 0.8129],\n",
      "        [0.2379, 0.7621],\n",
      "        [0.2087, 0.7913],\n",
      "        [0.2499, 0.7501],\n",
      "        [0.2138, 0.7862],\n",
      "        [0.0037, 0.9963]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.38515201210975647\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  55%|███████████████▎            | 28/51 [01:04<00:52,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.5032e-01, 7.4968e-01],\n",
      "        [1.7229e-01, 8.2771e-01],\n",
      "        [2.4211e-01, 7.5789e-01],\n",
      "        [9.9931e-01, 6.9059e-04],\n",
      "        [2.8386e-01, 7.1614e-01],\n",
      "        [2.1713e-01, 7.8287e-01],\n",
      "        [1.9555e-02, 9.8045e-01],\n",
      "        [1.8809e-01, 8.1191e-01],\n",
      "        [2.3544e-01, 7.6456e-01],\n",
      "        [1.3225e-01, 8.6775e-01],\n",
      "        [6.1473e-01, 3.8527e-01],\n",
      "        [2.1337e-01, 7.8663e-01],\n",
      "        [2.2127e-01, 7.7873e-01],\n",
      "        [2.1767e-01, 7.8233e-01],\n",
      "        [2.1850e-01, 7.8150e-01],\n",
      "        [2.2217e-02, 9.7778e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5986227989196777\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  57%|███████████████▉            | 29/51 [01:07<00:51,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2039, 0.7961],\n",
      "        [0.1450, 0.8550],\n",
      "        [0.2194, 0.7806],\n",
      "        [0.2131, 0.7869],\n",
      "        [0.1596, 0.8404],\n",
      "        [0.1211, 0.8789],\n",
      "        [0.2087, 0.7913],\n",
      "        [0.2193, 0.7807],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.2239, 0.7761],\n",
      "        [0.2130, 0.7870],\n",
      "        [0.1497, 0.8503],\n",
      "        [0.1049, 0.8951],\n",
      "        [0.9861, 0.0139],\n",
      "        [0.2185, 0.7815],\n",
      "        [0.2301, 0.7699]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4676656126976013\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  59%|████████████████▍           | 30/51 [01:09<00:48,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1736, 0.8264],\n",
      "        [0.1741, 0.8259],\n",
      "        [0.2130, 0.7870],\n",
      "        [0.2262, 0.7738],\n",
      "        [0.0871, 0.9129],\n",
      "        [0.1683, 0.8317],\n",
      "        [0.1866, 0.8134],\n",
      "        [0.0276, 0.9724],\n",
      "        [0.2147, 0.7853],\n",
      "        [0.0573, 0.9427],\n",
      "        [0.9823, 0.0177],\n",
      "        [0.0117, 0.9883],\n",
      "        [0.2178, 0.7822],\n",
      "        [0.0681, 0.9319],\n",
      "        [0.1952, 0.8048],\n",
      "        [0.2871, 0.7129]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.3959004282951355\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  61%|█████████████████           | 31/51 [01:11<00:46,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2574, 0.7426],\n",
      "        [0.2174, 0.7826],\n",
      "        [0.1959, 0.8041],\n",
      "        [0.2599, 0.7401],\n",
      "        [0.2440, 0.7560],\n",
      "        [0.4626, 0.5374],\n",
      "        [0.2251, 0.7749],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.2198, 0.7802],\n",
      "        [0.2231, 0.7769],\n",
      "        [0.0898, 0.9102],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.7139, 0.2861],\n",
      "        [0.1973, 0.8027],\n",
      "        [0.2188, 0.7812],\n",
      "        [0.1934, 0.8066]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.43357956409454346\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  63%|█████████████████▌          | 32/51 [01:14<00:44,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2077, 0.7923],\n",
      "        [0.2241, 0.7759],\n",
      "        [0.2133, 0.7867],\n",
      "        [0.1970, 0.8030],\n",
      "        [0.2163, 0.7837],\n",
      "        [0.0019, 0.9981],\n",
      "        [0.2039, 0.7961],\n",
      "        [0.7850, 0.2150],\n",
      "        [0.1995, 0.8005],\n",
      "        [0.0603, 0.9397],\n",
      "        [0.2069, 0.7931],\n",
      "        [0.0279, 0.9721],\n",
      "        [0.2277, 0.7723],\n",
      "        [0.2114, 0.7886],\n",
      "        [0.1308, 0.8692],\n",
      "        [0.4851, 0.5149]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.388847678899765\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  65%|██████████████████          | 33/51 [01:16<00:41,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3101, 0.6899],\n",
      "        [0.1261, 0.8739],\n",
      "        [0.1099, 0.8901],\n",
      "        [0.6524, 0.3476],\n",
      "        [0.0890, 0.9110],\n",
      "        [0.1993, 0.8007],\n",
      "        [0.2169, 0.7831],\n",
      "        [0.3898, 0.6102],\n",
      "        [0.1266, 0.8734],\n",
      "        [0.4334, 0.5666],\n",
      "        [0.2329, 0.7671],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.1607, 0.8393],\n",
      "        [0.2263, 0.7737],\n",
      "        [0.2050, 0.7950],\n",
      "        [0.1449, 0.8551]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4300645589828491\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  67%|██████████████████▋         | 34/51 [01:18<00:39,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2217, 0.7783],\n",
      "        [0.2169, 0.7831],\n",
      "        [0.1700, 0.8300],\n",
      "        [0.2068, 0.7932],\n",
      "        [0.2278, 0.7722],\n",
      "        [0.2309, 0.7691],\n",
      "        [0.1682, 0.8318],\n",
      "        [0.2018, 0.7982],\n",
      "        [0.1826, 0.8174],\n",
      "        [0.0109, 0.9891],\n",
      "        [0.7325, 0.2675],\n",
      "        [0.1581, 0.8419],\n",
      "        [0.0251, 0.9749],\n",
      "        [0.1100, 0.8900],\n",
      "        [0.2370, 0.7630],\n",
      "        [0.2594, 0.7406]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3425349295139313\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  69%|███████████████████▏        | 35/51 [01:21<00:37,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2203, 0.7797],\n",
      "        [0.1936, 0.8064],\n",
      "        [0.1999, 0.8001],\n",
      "        [0.1866, 0.8134],\n",
      "        [0.2115, 0.7885],\n",
      "        [0.2017, 0.7983],\n",
      "        [0.1796, 0.8204],\n",
      "        [0.1907, 0.8093],\n",
      "        [0.2036, 0.7964],\n",
      "        [0.0313, 0.9687],\n",
      "        [0.3996, 0.6004],\n",
      "        [0.0373, 0.9627],\n",
      "        [0.1606, 0.8394],\n",
      "        [0.7815, 0.2185],\n",
      "        [0.3098, 0.6902],\n",
      "        [0.2150, 0.7850]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.35428571701049805\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  71%|███████████████████▊        | 36/51 [01:23<00:34,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2187, 0.7813],\n",
      "        [0.1003, 0.8997],\n",
      "        [0.2175, 0.7825],\n",
      "        [0.2006, 0.7994],\n",
      "        [0.2425, 0.7575],\n",
      "        [0.2166, 0.7834],\n",
      "        [0.1586, 0.8414],\n",
      "        [0.1661, 0.8339],\n",
      "        [0.2100, 0.7900],\n",
      "        [0.1551, 0.8449],\n",
      "        [0.1558, 0.8442],\n",
      "        [0.1978, 0.8022],\n",
      "        [0.1657, 0.8343],\n",
      "        [0.1504, 0.8496],\n",
      "        [0.9236, 0.0764],\n",
      "        [0.4980, 0.5020]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5252882242202759\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  73%|████████████████████▎       | 37/51 [01:25<00:32,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.6124e-01, 7.3876e-01],\n",
      "        [1.5647e-01, 8.4353e-01],\n",
      "        [1.9877e-01, 8.0123e-01],\n",
      "        [4.1260e-01, 5.8740e-01],\n",
      "        [9.0121e-04, 9.9910e-01],\n",
      "        [1.9997e-01, 8.0003e-01],\n",
      "        [3.3673e-01, 6.6327e-01],\n",
      "        [1.9463e-01, 8.0537e-01],\n",
      "        [1.6039e-02, 9.8396e-01],\n",
      "        [1.6258e-01, 8.3742e-01],\n",
      "        [1.7257e-01, 8.2743e-01],\n",
      "        [2.8776e-01, 7.1224e-01],\n",
      "        [1.7579e-01, 8.2421e-01],\n",
      "        [2.0852e-01, 7.9148e-01],\n",
      "        [1.9090e-01, 8.0910e-01],\n",
      "        [2.1556e-01, 7.8444e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.6478058695793152\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  75%|████████████████████▊       | 38/51 [01:28<00:30,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0426, 0.9574],\n",
      "        [0.5138, 0.4862],\n",
      "        [0.2116, 0.7884],\n",
      "        [0.2000, 0.8000],\n",
      "        [0.2045, 0.7955],\n",
      "        [0.1703, 0.8297],\n",
      "        [0.8258, 0.1742],\n",
      "        [0.1642, 0.8358],\n",
      "        [0.1039, 0.8961],\n",
      "        [0.2018, 0.7982],\n",
      "        [0.0046, 0.9954],\n",
      "        [0.2123, 0.7877],\n",
      "        [0.2098, 0.7902],\n",
      "        [0.1716, 0.8284],\n",
      "        [0.2767, 0.7233],\n",
      "        [0.2193, 0.7807]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.37908273935317993\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  76%|█████████████████████▍      | 39/51 [01:30<00:28,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.9127e-01, 8.0873e-01],\n",
      "        [8.0854e-05, 9.9992e-01],\n",
      "        [2.0283e-01, 7.9717e-01],\n",
      "        [1.9952e-01, 8.0048e-01],\n",
      "        [1.8891e-01, 8.1109e-01],\n",
      "        [1.7592e-01, 8.2408e-01],\n",
      "        [2.5130e-01, 7.4870e-01],\n",
      "        [2.0087e-01, 7.9913e-01],\n",
      "        [1.8327e-01, 8.1673e-01],\n",
      "        [1.7625e-01, 8.2375e-01],\n",
      "        [2.3209e-01, 7.6791e-01],\n",
      "        [1.4282e-02, 9.8572e-01],\n",
      "        [2.1042e-01, 7.8958e-01],\n",
      "        [3.3253e-01, 6.6747e-01],\n",
      "        [1.9275e-01, 8.0725e-01],\n",
      "        [2.1720e-02, 9.7828e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.39748209714889526\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  78%|█████████████████████▉      | 40/51 [01:32<00:25,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1964, 0.8036],\n",
      "        [0.2612, 0.7388],\n",
      "        [0.1444, 0.8556],\n",
      "        [0.1676, 0.8324],\n",
      "        [0.2059, 0.7941],\n",
      "        [0.0434, 0.9566],\n",
      "        [0.1755, 0.8245],\n",
      "        [0.1904, 0.8096],\n",
      "        [0.2286, 0.7714],\n",
      "        [0.0125, 0.9875],\n",
      "        [0.1788, 0.8212],\n",
      "        [0.2132, 0.7868],\n",
      "        [0.2260, 0.7740],\n",
      "        [0.2830, 0.7170],\n",
      "        [0.2108, 0.7892],\n",
      "        [0.1889, 0.8111]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4802275598049164\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  80%|██████████████████████▌     | 41/51 [01:35<00:23,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2083, 0.7917],\n",
      "        [0.1258, 0.8742],\n",
      "        [0.2200, 0.7800],\n",
      "        [0.0388, 0.9612],\n",
      "        [0.1830, 0.8170],\n",
      "        [0.1801, 0.8199],\n",
      "        [0.1894, 0.8106],\n",
      "        [0.8366, 0.1634],\n",
      "        [0.1859, 0.8141],\n",
      "        [0.2002, 0.7998],\n",
      "        [0.1934, 0.8066],\n",
      "        [0.2062, 0.7938],\n",
      "        [0.2036, 0.7964],\n",
      "        [0.1990, 0.8010],\n",
      "        [0.2072, 0.7928],\n",
      "        [0.1773, 0.8227]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.7703573107719421\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  82%|███████████████████████     | 42/51 [01:37<00:21,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1969, 0.8031],\n",
      "        [0.0147, 0.9853],\n",
      "        [0.0240, 0.9760],\n",
      "        [0.1631, 0.8369],\n",
      "        [0.2049, 0.7951],\n",
      "        [0.2030, 0.7970],\n",
      "        [0.2129, 0.7871],\n",
      "        [0.2147, 0.7853],\n",
      "        [0.1330, 0.8670],\n",
      "        [0.7791, 0.2209],\n",
      "        [0.2019, 0.7981],\n",
      "        [0.7825, 0.2175],\n",
      "        [0.2055, 0.7945],\n",
      "        [0.1651, 0.8349],\n",
      "        [0.1944, 0.8056],\n",
      "        [0.0863, 0.9137]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.6160556077957153\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  84%|███████████████████████▌    | 43/51 [01:40<00:19,  2.39s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.0537e-01, 7.9463e-01],\n",
      "        [4.1844e-01, 5.8156e-01],\n",
      "        [5.3703e-02, 9.4630e-01],\n",
      "        [1.9330e-01, 8.0670e-01],\n",
      "        [2.2168e-01, 7.7832e-01],\n",
      "        [2.2644e-01, 7.7356e-01],\n",
      "        [2.0322e-01, 7.9678e-01],\n",
      "        [2.1036e-01, 7.8964e-01],\n",
      "        [2.0788e-01, 7.9212e-01],\n",
      "        [7.1827e-01, 2.8173e-01],\n",
      "        [2.1141e-01, 7.8859e-01],\n",
      "        [7.5938e-05, 9.9992e-01],\n",
      "        [1.9051e-01, 8.0949e-01],\n",
      "        [2.2008e-01, 7.7992e-01],\n",
      "        [1.9030e-01, 8.0970e-01],\n",
      "        [9.3699e-02, 9.0630e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4971550405025482\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  86%|████████████████████████▏   | 44/51 [01:42<00:16,  2.38s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[1.0873e-01, 8.9127e-01],\n",
      "        [2.0691e-01, 7.9309e-01],\n",
      "        [2.2947e-01, 7.7053e-01],\n",
      "        [1.9931e-01, 8.0069e-01],\n",
      "        [2.6498e-01, 7.3502e-01],\n",
      "        [1.9348e-01, 8.0652e-01],\n",
      "        [2.1153e-01, 7.8847e-01],\n",
      "        [3.4535e-04, 9.9965e-01],\n",
      "        [1.0527e-01, 8.9473e-01],\n",
      "        [3.6058e-02, 9.6394e-01],\n",
      "        [7.7798e-02, 9.2220e-01],\n",
      "        [9.8411e-01, 1.5888e-02],\n",
      "        [2.2948e-01, 7.7052e-01],\n",
      "        [8.2393e-01, 1.7607e-01],\n",
      "        [9.1716e-01, 8.2835e-02],\n",
      "        [6.6099e-02, 9.3390e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.39230942726135254\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  88%|████████████████████████▋   | 45/51 [01:44<00:14,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2275, 0.7725],\n",
      "        [0.8842, 0.1158],\n",
      "        [0.2136, 0.7864],\n",
      "        [0.1963, 0.8037],\n",
      "        [0.0273, 0.9727],\n",
      "        [0.2064, 0.7936],\n",
      "        [0.2055, 0.7945],\n",
      "        [0.2288, 0.7712],\n",
      "        [0.1170, 0.8830],\n",
      "        [0.0416, 0.9584],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.1597, 0.8403],\n",
      "        [0.2007, 0.7993],\n",
      "        [0.2728, 0.7272],\n",
      "        [0.2114, 0.7886],\n",
      "        [0.2060, 0.7940]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.500166118144989\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  90%|█████████████████████████▎  | 46/51 [01:47<00:11,  2.35s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2048, 0.7952],\n",
      "        [0.2365, 0.7635],\n",
      "        [0.2413, 0.7587],\n",
      "        [0.2059, 0.7941],\n",
      "        [0.1624, 0.8376],\n",
      "        [0.1887, 0.8113],\n",
      "        [0.0837, 0.9163],\n",
      "        [0.2105, 0.7895],\n",
      "        [0.2202, 0.7798],\n",
      "        [0.9893, 0.0107],\n",
      "        [0.6075, 0.3925],\n",
      "        [0.2679, 0.7321],\n",
      "        [0.3656, 0.6344],\n",
      "        [0.2212, 0.7788],\n",
      "        [0.2204, 0.7796],\n",
      "        [0.2218, 0.7782]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4360557198524475\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  92%|█████████████████████████▊  | 47/51 [01:49<00:09,  2.37s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2130, 0.7870],\n",
      "        [0.2113, 0.7887],\n",
      "        [0.0852, 0.9148],\n",
      "        [0.1645, 0.8355],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.0710, 0.9290],\n",
      "        [0.2110, 0.7890],\n",
      "        [0.3459, 0.6541],\n",
      "        [0.5903, 0.4097],\n",
      "        [0.2165, 0.7835],\n",
      "        [0.0556, 0.9444],\n",
      "        [0.2027, 0.7973],\n",
      "        [0.2037, 0.7963],\n",
      "        [0.1951, 0.8049],\n",
      "        [0.3111, 0.6889],\n",
      "        [0.2604, 0.7396]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3491605520248413\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  94%|██████████████████████████▎ | 48/51 [01:51<00:07,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2396, 0.7604],\n",
      "        [0.2251, 0.7749],\n",
      "        [0.2105, 0.7895],\n",
      "        [0.2492, 0.7508],\n",
      "        [0.2349, 0.7651],\n",
      "        [0.3047, 0.6953],\n",
      "        [0.2534, 0.7466],\n",
      "        [0.2625, 0.7375],\n",
      "        [0.2498, 0.7502],\n",
      "        [0.1883, 0.8117],\n",
      "        [0.3569, 0.6431],\n",
      "        [0.2253, 0.7747],\n",
      "        [0.1998, 0.8002],\n",
      "        [0.2138, 0.7862],\n",
      "        [0.2286, 0.7714],\n",
      "        [0.2200, 0.7800]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.43130671977996826\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  96%|██████████████████████████▉ | 49/51 [01:54<00:04,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.8779, 0.1221],\n",
      "        [0.2146, 0.7854],\n",
      "        [0.1373, 0.8627],\n",
      "        [0.2133, 0.7867],\n",
      "        [0.2267, 0.7733],\n",
      "        [0.2446, 0.7554],\n",
      "        [0.2103, 0.7897],\n",
      "        [0.2152, 0.7848],\n",
      "        [0.2039, 0.7961],\n",
      "        [0.9979, 0.0021],\n",
      "        [0.1348, 0.8652],\n",
      "        [0.3027, 0.6973],\n",
      "        [0.2391, 0.7609],\n",
      "        [0.2140, 0.7860],\n",
      "        [0.2165, 0.7835],\n",
      "        [0.2969, 0.7031]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.3610721230506897\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10:  98%|███████████████████████████▍| 50/51 [01:56<00:02,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4931, 0.5069],\n",
      "        [0.1938, 0.8062]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4612617790699005\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/10: 100%|████████████████████████████| 51/51 [01:56<00:00,  2.29s/batch]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "total_loss:24.279245853424072\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:   0%|                                     | 0/51 [00:00<?, ?batch/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2609, 0.7391],\n",
      "        [0.2477, 0.7523],\n",
      "        [0.2253, 0.7747],\n",
      "        [0.2250, 0.7750],\n",
      "        [0.1147, 0.8853],\n",
      "        [0.2413, 0.7587],\n",
      "        [0.0567, 0.9433],\n",
      "        [0.2357, 0.7643],\n",
      "        [0.2101, 0.7899],\n",
      "        [0.2339, 0.7661],\n",
      "        [0.2170, 0.7830],\n",
      "        [0.1027, 0.8973],\n",
      "        [0.0696, 0.9304],\n",
      "        [0.2100, 0.7900],\n",
      "        [0.2147, 0.7853],\n",
      "        [0.0099, 0.9901]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4744628667831421\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:   2%|▌                            | 1/51 [00:02<01:58,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2673, 0.7327],\n",
      "        [0.2141, 0.7859],\n",
      "        [0.2626, 0.7374],\n",
      "        [0.2112, 0.7888],\n",
      "        [0.2567, 0.7433],\n",
      "        [0.0252, 0.9748],\n",
      "        [0.2234, 0.7766],\n",
      "        [0.2185, 0.7815],\n",
      "        [0.2182, 0.7818],\n",
      "        [0.1381, 0.8619],\n",
      "        [0.9598, 0.0402],\n",
      "        [0.1821, 0.8179],\n",
      "        [0.2293, 0.7707],\n",
      "        [0.0314, 0.9686],\n",
      "        [0.2088, 0.7912],\n",
      "        [0.2409, 0.7591]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4203903079032898\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:   4%|█▏                           | 2/51 [00:04<01:52,  2.31s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2087, 0.7913],\n",
      "        [0.2898, 0.7102],\n",
      "        [0.7368, 0.2632],\n",
      "        [0.2117, 0.7883],\n",
      "        [0.0248, 0.9752],\n",
      "        [0.2243, 0.7757],\n",
      "        [0.2331, 0.7669],\n",
      "        [0.2148, 0.7852],\n",
      "        [0.2932, 0.7068],\n",
      "        [0.2882, 0.7118],\n",
      "        [0.0766, 0.9234],\n",
      "        [0.0632, 0.9368],\n",
      "        [0.2192, 0.7808],\n",
      "        [0.2538, 0.7462],\n",
      "        [0.2681, 0.7319],\n",
      "        [0.2798, 0.7202]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.5196369886398315\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:   6%|█▋                           | 3/51 [00:06<01:50,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.3799, 0.6201],\n",
      "        [0.2457, 0.7543],\n",
      "        [0.2185, 0.7815],\n",
      "        [0.8835, 0.1165],\n",
      "        [0.2928, 0.7072],\n",
      "        [0.1318, 0.8682],\n",
      "        [0.8712, 0.1288],\n",
      "        [0.4376, 0.5624],\n",
      "        [0.2001, 0.7999],\n",
      "        [0.1558, 0.8442],\n",
      "        [0.9961, 0.0039],\n",
      "        [0.1139, 0.8861],\n",
      "        [0.0883, 0.9117],\n",
      "        [0.1975, 0.8025],\n",
      "        [0.2697, 0.7303],\n",
      "        [0.1986, 0.8014]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4771844148635864\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:   8%|██▎                          | 4/51 [00:09<01:48,  2.30s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.2173, 0.7827],\n",
      "        [0.2295, 0.7705],\n",
      "        [0.2455, 0.7545],\n",
      "        [0.2513, 0.7487],\n",
      "        [0.6951, 0.3049],\n",
      "        [0.2800, 0.7200],\n",
      "        [0.2367, 0.7633],\n",
      "        [0.2489, 0.7511],\n",
      "        [0.2382, 0.7618],\n",
      "        [0.2326, 0.7674],\n",
      "        [0.2211, 0.7789],\n",
      "        [0.2049, 0.7951],\n",
      "        [0.0070, 0.9930],\n",
      "        [0.2230, 0.7770],\n",
      "        [0.2248, 0.7752],\n",
      "        [0.2994, 0.7006]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.]], dtype=torch.float64)\n",
      "loss:0.5827357769012451\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  10%|██▊                          | 5/51 [00:11<01:45,  2.29s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.4117, 0.5883],\n",
      "        [0.2460, 0.7540],\n",
      "        [0.1281, 0.8719],\n",
      "        [0.8173, 0.1827],\n",
      "        [0.2129, 0.7871],\n",
      "        [0.2165, 0.7835],\n",
      "        [0.9968, 0.0032],\n",
      "        [0.3206, 0.6794],\n",
      "        [0.2659, 0.7341],\n",
      "        [0.3012, 0.6988],\n",
      "        [0.2103, 0.7897],\n",
      "        [0.1318, 0.8682],\n",
      "        [0.3182, 0.6818],\n",
      "        [0.2195, 0.7805],\n",
      "        [0.2925, 0.7075],\n",
      "        [0.2348, 0.7652]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3948347568511963\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  12%|███▍                         | 6/51 [00:13<01:44,  2.32s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[2.2146e-01, 7.7854e-01],\n",
      "        [9.6707e-02, 9.0329e-01],\n",
      "        [2.2841e-01, 7.7159e-01],\n",
      "        [9.9995e-01, 5.1726e-05],\n",
      "        [2.1543e-01, 7.8457e-01],\n",
      "        [9.7516e-01, 2.4842e-02],\n",
      "        [2.1912e-01, 7.8088e-01],\n",
      "        [2.1988e-01, 7.8012e-01],\n",
      "        [2.4480e-01, 7.5520e-01],\n",
      "        [2.2760e-01, 7.7240e-01],\n",
      "        [2.1683e-01, 7.8317e-01],\n",
      "        [2.1609e-01, 7.8391e-01],\n",
      "        [2.1254e-01, 7.8746e-01],\n",
      "        [2.8579e-01, 7.1421e-01],\n",
      "        [8.5535e-02, 9.1447e-01],\n",
      "        [2.3927e-01, 7.6073e-01]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.3634103536605835\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  14%|███▉                         | 7/51 [00:16<01:42,  2.33s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1984, 0.8016],\n",
      "        [0.2061, 0.7939],\n",
      "        [0.1805, 0.8195],\n",
      "        [0.3641, 0.6359],\n",
      "        [0.2310, 0.7690],\n",
      "        [0.2271, 0.7729],\n",
      "        [0.1484, 0.8516],\n",
      "        [0.1820, 0.8180],\n",
      "        [0.9788, 0.0212],\n",
      "        [0.1300, 0.8700],\n",
      "        [0.0297, 0.9703],\n",
      "        [0.1983, 0.8017],\n",
      "        [0.2157, 0.7843],\n",
      "        [0.2037, 0.7963],\n",
      "        [0.1990, 0.8010],\n",
      "        [0.0257, 0.9743]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4376178979873657\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  16%|████▌                        | 8/51 [00:18<01:40,  2.34s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.0852, 0.9148],\n",
      "        [0.2426, 0.7574],\n",
      "        [0.2125, 0.7875],\n",
      "        [0.2152, 0.7848],\n",
      "        [0.2696, 0.7304],\n",
      "        [0.2161, 0.7839],\n",
      "        [0.1995, 0.8005],\n",
      "        [0.2421, 0.7579],\n",
      "        [0.1845, 0.8155],\n",
      "        [0.5143, 0.4857],\n",
      "        [0.9854, 0.0146],\n",
      "        [0.2195, 0.7805],\n",
      "        [0.2944, 0.7056],\n",
      "        [0.2102, 0.7898],\n",
      "        [0.1967, 0.8033],\n",
      "        [0.3462, 0.6538]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [1., 0.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.7586383819580078\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  18%|█████                        | 9/51 [00:20<01:39,  2.36s/batch]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "out:tensor([[0.1938, 0.8062],\n",
      "        [0.0479, 0.9521],\n",
      "        [0.9046, 0.0954],\n",
      "        [0.2391, 0.7609],\n",
      "        [0.1895, 0.8105],\n",
      "        [0.2189, 0.7811],\n",
      "        [0.3002, 0.6998],\n",
      "        [0.2079, 0.7921],\n",
      "        [0.2511, 0.7489],\n",
      "        [0.2122, 0.7878],\n",
      "        [0.1946, 0.8054],\n",
      "        [0.2294, 0.7706],\n",
      "        [0.2622, 0.7378],\n",
      "        [0.2380, 0.7620],\n",
      "        [0.2141, 0.7859],\n",
      "        [0.2157, 0.7843]], grad_fn=<SqueezeBackward1>)\n",
      "ground truth:tensor([[0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [1., 0.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.],\n",
      "        [0., 1.]], dtype=torch.float64)\n",
      "loss:0.4087907373905182\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/10:  18%|█████                        | 9/51 [00:23<01:48,  2.59s/batch]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[120], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m train(model, trainer, lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1e-3\u001b[39m, epochs\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m10\u001b[39m, device \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m)\n",
      "Cell \u001b[0;32mIn[117], line 19\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m(model, train_iter, lr, epochs, device)\u001b[0m\n\u001b[1;32m     17\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mground truth:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00my\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m     18\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mloss:\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mloss\u001b[38;5;241m.\u001b[39mitem()\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m---> 19\u001b[0m loss\u001b[38;5;241m.\u001b[39mbackward()\n\u001b[1;32m     20\u001b[0m opt\u001b[38;5;241m.\u001b[39mstep()\n\u001b[1;32m     21\u001b[0m total_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m loss\u001b[38;5;241m.\u001b[39mitem()\n",
      "File \u001b[0;32m~/miniconda3/envs/DL/lib/python3.11/site-packages/torch/_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    482\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    483\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m    484\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m    485\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    490\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m    491\u001b[0m     )\n\u001b[0;32m--> 492\u001b[0m torch\u001b[38;5;241m.\u001b[39mautograd\u001b[38;5;241m.\u001b[39mbackward(\n\u001b[1;32m    493\u001b[0m     \u001b[38;5;28mself\u001b[39m, gradient, retain_graph, create_graph, inputs\u001b[38;5;241m=\u001b[39minputs\n\u001b[1;32m    494\u001b[0m )\n",
      "File \u001b[0;32m~/miniconda3/envs/DL/lib/python3.11/site-packages/torch/autograd/__init__.py:251\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    246\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m    248\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m    249\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m    250\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 251\u001b[0m Variable\u001b[38;5;241m.\u001b[39m_execution_engine\u001b[38;5;241m.\u001b[39mrun_backward(  \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[1;32m    252\u001b[0m     tensors,\n\u001b[1;32m    253\u001b[0m     grad_tensors_,\n\u001b[1;32m    254\u001b[0m     retain_graph,\n\u001b[1;32m    255\u001b[0m     create_graph,\n\u001b[1;32m    256\u001b[0m     inputs,\n\u001b[1;32m    257\u001b[0m     allow_unreachable\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m    258\u001b[0m     accumulate_grad\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m    259\u001b[0m )\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "train(model, trainer, lr=1e-3, epochs=10, device = None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "id": "ba4f0bfb-4d4d-4b0f-b460-adfff2c57b30",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), './models/model_step.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "id": "05ee17d9-1b51-4d0e-bcdc-7ddfb1c5deb9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "output:tensor([[[0.5094, 0.4906]],\n",
      "\n",
      "        [[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5046, 0.4954]],\n",
      "\n",
      "        [[0.5247, 0.4753]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.5205, 0.4795]],\n",
      "\n",
      "        [[0.5256, 0.4744]],\n",
      "\n",
      "        [[0.5205, 0.4795]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5083, 0.4917]],\n",
      "\n",
      "        [[0.5156, 0.4844]],\n",
      "\n",
      "        [[0.5208, 0.4792]],\n",
      "\n",
      "        [[0.5140, 0.4860]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5147, 0.4853]],\n",
      "\n",
      "        [[0.4623, 0.5377]],\n",
      "\n",
      "        [[0.5181, 0.4819]],\n",
      "\n",
      "        [[0.5126, 0.4874]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5156, 0.4844]],\n",
      "\n",
      "        [[0.5197, 0.4803]],\n",
      "\n",
      "        [[0.5106, 0.4894]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5193, 0.4807]],\n",
      "\n",
      "        [[0.5177, 0.4823]],\n",
      "\n",
      "        [[0.5080, 0.4920]],\n",
      "\n",
      "        [[0.5059, 0.4941]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5158, 0.4842]],\n",
      "\n",
      "        [[0.5156, 0.4844]],\n",
      "\n",
      "        [[0.5210, 0.4790]],\n",
      "\n",
      "        [[0.5188, 0.4812]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5161, 0.4839]],\n",
      "\n",
      "        [[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.5144, 0.4856]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5100, 0.4900]],\n",
      "\n",
      "        [[0.5155, 0.4845]],\n",
      "\n",
      "        [[0.5145, 0.4855]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5103, 0.4897]],\n",
      "\n",
      "        [[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5127, 0.4873]],\n",
      "\n",
      "        [[0.5122, 0.4878]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5101, 0.4899]],\n",
      "\n",
      "        [[0.5034, 0.4966]],\n",
      "\n",
      "        [[0.5206, 0.4794]],\n",
      "\n",
      "        [[0.5153, 0.4847]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5134, 0.4866]],\n",
      "\n",
      "        [[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5107, 0.4893]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5170, 0.4830]],\n",
      "\n",
      "        [[0.5100, 0.4900]],\n",
      "\n",
      "        [[0.4841, 0.5159]],\n",
      "\n",
      "        [[0.5102, 0.4898]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5151, 0.4849]],\n",
      "\n",
      "        [[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5146, 0.4854]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5181, 0.4819]],\n",
      "\n",
      "        [[0.5214, 0.4786]],\n",
      "\n",
      "        [[0.5111, 0.4889]],\n",
      "\n",
      "        [[0.5159, 0.4841]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5126, 0.4874]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5058, 0.4942]],\n",
      "\n",
      "        [[0.5048, 0.4952]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5215, 0.4785]],\n",
      "\n",
      "        [[0.5056, 0.4944]],\n",
      "\n",
      "        [[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5114, 0.4886]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5140, 0.4860]],\n",
      "\n",
      "        [[0.5108, 0.4892]],\n",
      "\n",
      "        [[0.5146, 0.4854]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5152, 0.4848]],\n",
      "\n",
      "        [[0.5129, 0.4871]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4922, 0.5078]],\n",
      "\n",
      "        [[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5147, 0.4853]],\n",
      "\n",
      "        [[0.5133, 0.4867]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5065, 0.4935]],\n",
      "\n",
      "        [[0.5178, 0.4822]],\n",
      "\n",
      "        [[0.5173, 0.4827]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5049, 0.4951]],\n",
      "\n",
      "        [[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5148, 0.4852]],\n",
      "\n",
      "        [[0.5204, 0.4796]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5218, 0.4782]],\n",
      "\n",
      "        [[0.5056, 0.4944]],\n",
      "\n",
      "        [[0.5148, 0.4852]],\n",
      "\n",
      "        [[0.5064, 0.4936]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5124, 0.4876]],\n",
      "\n",
      "        [[0.5155, 0.4845]],\n",
      "\n",
      "        [[0.5199, 0.4801]],\n",
      "\n",
      "        [[0.5163, 0.4837]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5167, 0.4833]],\n",
      "\n",
      "        [[0.5197, 0.4803]],\n",
      "\n",
      "        [[0.5086, 0.4914]],\n",
      "\n",
      "        [[0.5131, 0.4869]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5021, 0.4979]],\n",
      "\n",
      "        [[0.5232, 0.4768]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.4825, 0.5175]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5165, 0.4835]],\n",
      "\n",
      "        [[0.5118, 0.4882]],\n",
      "\n",
      "        [[0.5133, 0.4867]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5134, 0.4866]],\n",
      "\n",
      "        [[0.5180, 0.4820]],\n",
      "\n",
      "        [[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.5119, 0.4881]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5178, 0.4822]],\n",
      "\n",
      "        [[0.5089, 0.4911]],\n",
      "\n",
      "        [[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.5134, 0.4866]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5101, 0.4899]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5090, 0.4910]],\n",
      "\n",
      "        [[0.5036, 0.4964]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5124, 0.4876]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5113, 0.4887]],\n",
      "\n",
      "        [[0.5185, 0.4815]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5050, 0.4950]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5121, 0.4879]],\n",
      "\n",
      "        [[0.5225, 0.4775]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5157, 0.4843]],\n",
      "\n",
      "        [[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5147, 0.4854]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5027, 0.4973]],\n",
      "\n",
      "        [[0.5099, 0.4901]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5120, 0.4880]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5146, 0.4854]],\n",
      "\n",
      "        [[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5164, 0.4836]],\n",
      "\n",
      "        [[0.5183, 0.4817]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5194, 0.4806]],\n",
      "\n",
      "        [[0.5111, 0.4889]],\n",
      "\n",
      "        [[0.5200, 0.4800]],\n",
      "\n",
      "        [[0.5105, 0.4895]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5157, 0.4843]],\n",
      "\n",
      "        [[0.5244, 0.4756]],\n",
      "\n",
      "        [[0.5192, 0.4808]],\n",
      "\n",
      "        [[0.5142, 0.4858]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5202, 0.4798]],\n",
      "\n",
      "        [[0.5113, 0.4887]],\n",
      "\n",
      "        [[0.5197, 0.4803]],\n",
      "\n",
      "        [[0.5165, 0.4835]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.5124, 0.4876]],\n",
      "\n",
      "        [[0.5179, 0.4821]],\n",
      "\n",
      "        [[0.5103, 0.4897]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5152, 0.4848]],\n",
      "\n",
      "        [[0.4988, 0.5012]],\n",
      "\n",
      "        [[0.4780, 0.5220]],\n",
      "\n",
      "        [[0.5203, 0.4797]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5205, 0.4795]],\n",
      "\n",
      "        [[0.5048, 0.4952]],\n",
      "\n",
      "        [[0.5147, 0.4853]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5248, 0.4752]],\n",
      "\n",
      "        [[0.5195, 0.4805]],\n",
      "\n",
      "        [[0.5208, 0.4792]],\n",
      "\n",
      "        [[0.5196, 0.4804]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5091, 0.4909]],\n",
      "\n",
      "        [[0.5139, 0.4861]],\n",
      "\n",
      "        [[0.5132, 0.4868]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5077, 0.4923]],\n",
      "\n",
      "        [[0.5105, 0.4895]],\n",
      "\n",
      "        [[0.5120, 0.4880]],\n",
      "\n",
      "        [[0.5200, 0.4800]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5103, 0.4897]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5089, 0.4911]],\n",
      "\n",
      "        [[0.5007, 0.4993]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5141, 0.4859]],\n",
      "\n",
      "        [[0.5143, 0.4857]],\n",
      "\n",
      "        [[0.5152, 0.4848]],\n",
      "\n",
      "        [[0.5172, 0.4828]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5170, 0.4830]],\n",
      "\n",
      "        [[0.5167, 0.4833]],\n",
      "\n",
      "        [[0.5151, 0.4849]],\n",
      "\n",
      "        [[0.5252, 0.4748]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5159, 0.4841]],\n",
      "\n",
      "        [[0.5125, 0.4875]],\n",
      "\n",
      "        [[0.5092, 0.4908]],\n",
      "\n",
      "        [[0.5157, 0.4843]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5046, 0.4954]],\n",
      "\n",
      "        [[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5110, 0.4890]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5084, 0.4916]],\n",
      "\n",
      "        [[0.5168, 0.4832]],\n",
      "\n",
      "        [[0.5147, 0.4853]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5096, 0.4904]],\n",
      "\n",
      "        [[0.5148, 0.4852]],\n",
      "\n",
      "        [[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5163, 0.4837]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.4741, 0.5259]],\n",
      "\n",
      "        [[0.5175, 0.4825]],\n",
      "\n",
      "        [[0.5104, 0.4896]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5109, 0.4891]],\n",
      "\n",
      "        [[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5150, 0.4850]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5062, 0.4938]],\n",
      "\n",
      "        [[0.4774, 0.5226]],\n",
      "\n",
      "        [[0.4997, 0.5003]],\n",
      "\n",
      "        [[0.5164, 0.4836]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5194, 0.4806]],\n",
      "\n",
      "        [[0.5197, 0.4803]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5105, 0.4895]],\n",
      "\n",
      "        [[0.5097, 0.4903]],\n",
      "\n",
      "        [[0.5051, 0.4949]],\n",
      "\n",
      "        [[0.5120, 0.4880]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5123, 0.4877]],\n",
      "\n",
      "        [[0.5187, 0.4813]],\n",
      "\n",
      "        [[0.5129, 0.4871]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5086, 0.4914]],\n",
      "\n",
      "        [[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5089, 0.4911]],\n",
      "\n",
      "        [[0.5090, 0.4910]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5163, 0.4837]],\n",
      "\n",
      "        [[0.5143, 0.4857]],\n",
      "\n",
      "        [[0.5282, 0.4718]],\n",
      "\n",
      "        [[0.5135, 0.4865]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5205, 0.4795]],\n",
      "\n",
      "        [[0.5243, 0.4757]],\n",
      "\n",
      "        [[0.4984, 0.5016]],\n",
      "\n",
      "        [[0.5203, 0.4797]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5095, 0.4905]],\n",
      "\n",
      "        [[0.5176, 0.4824]],\n",
      "\n",
      "        [[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5063, 0.4937]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5238, 0.4762]],\n",
      "\n",
      "        [[0.5005, 0.4995]],\n",
      "\n",
      "        [[0.5183, 0.4817]],\n",
      "\n",
      "        [[0.5119, 0.4881]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5141, 0.4859]],\n",
      "\n",
      "        [[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5145, 0.4855]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4668, 0.5332]],\n",
      "\n",
      "        [[0.5119, 0.4881]],\n",
      "\n",
      "        [[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5010, 0.4990]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5199, 0.4801]],\n",
      "\n",
      "        [[0.5305, 0.4695]],\n",
      "\n",
      "        [[0.5156, 0.4844]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5110, 0.4890]],\n",
      "\n",
      "        [[0.5194, 0.4806]],\n",
      "\n",
      "        [[0.5142, 0.4858]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5119, 0.4881]],\n",
      "\n",
      "        [[0.5187, 0.4813]],\n",
      "\n",
      "        [[0.5218, 0.4782]],\n",
      "\n",
      "        [[0.5085, 0.4915]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5119, 0.4881]],\n",
      "\n",
      "        [[0.5153, 0.4847]],\n",
      "\n",
      "        [[0.5231, 0.4769]],\n",
      "\n",
      "        [[0.5125, 0.4875]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5098, 0.4902]],\n",
      "\n",
      "        [[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5056, 0.4944]],\n",
      "\n",
      "        [[0.5117, 0.4883]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5139, 0.4861]],\n",
      "\n",
      "        [[0.5217, 0.4783]],\n",
      "\n",
      "        [[0.5127, 0.4873]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5129, 0.4871]],\n",
      "\n",
      "        [[0.4764, 0.5236]],\n",
      "\n",
      "        [[0.5140, 0.4860]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5130, 0.4870]],\n",
      "\n",
      "        [[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5159, 0.4841]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5140, 0.4860]],\n",
      "\n",
      "        [[0.5193, 0.4807]],\n",
      "\n",
      "        [[0.5171, 0.4829]],\n",
      "\n",
      "        [[0.5116, 0.4884]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5139, 0.4861]],\n",
      "\n",
      "        [[0.5163, 0.4837]],\n",
      "\n",
      "        [[0.5069, 0.4931]],\n",
      "\n",
      "        [[0.5040, 0.4960]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5102, 0.4898]],\n",
      "\n",
      "        [[0.5191, 0.4809]],\n",
      "\n",
      "        [[0.5127, 0.4873]],\n",
      "\n",
      "        [[0.5001, 0.4999]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5094, 0.4906]],\n",
      "\n",
      "        [[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5128, 0.4872]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5190, 0.4810]],\n",
      "\n",
      "        [[0.5211, 0.4789]],\n",
      "\n",
      "        [[0.5147, 0.4853]],\n",
      "\n",
      "        [[0.5158, 0.4842]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5242, 0.4758]],\n",
      "\n",
      "        [[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5142, 0.4858]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5257, 0.4743]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5127, 0.4873]],\n",
      "\n",
      "        [[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.5100, 0.4900]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5102, 0.4898]],\n",
      "\n",
      "        [[0.5093, 0.4907]],\n",
      "\n",
      "        [[0.5098, 0.4902]],\n",
      "\n",
      "        [[0.5200, 0.4800]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5161, 0.4839]],\n",
      "\n",
      "        [[0.5188, 0.4812]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5014, 0.4986]],\n",
      "\n",
      "        [[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5111, 0.4889]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5219, 0.4781]],\n",
      "\n",
      "        [[0.4993, 0.5007]],\n",
      "\n",
      "        [[0.5137, 0.4863]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4808, 0.5192]],\n",
      "\n",
      "        [[0.5100, 0.4900]],\n",
      "\n",
      "        [[0.5157, 0.4843]],\n",
      "\n",
      "        [[0.5179, 0.4821]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5055, 0.4945]],\n",
      "\n",
      "        [[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5016, 0.4984]],\n",
      "\n",
      "        [[0.5129, 0.4871]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5058, 0.4942]],\n",
      "\n",
      "        [[0.5240, 0.4760]],\n",
      "\n",
      "        [[0.5101, 0.4899]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5194, 0.4806]],\n",
      "\n",
      "        [[0.5211, 0.4789]],\n",
      "\n",
      "        [[0.5092, 0.4908]],\n",
      "\n",
      "        [[0.5130, 0.4870]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5208, 0.4792]],\n",
      "\n",
      "        [[0.5140, 0.4860]],\n",
      "\n",
      "        [[0.5165, 0.4835]],\n",
      "\n",
      "        [[0.5221, 0.4779]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5154, 0.4846]],\n",
      "\n",
      "        [[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5205, 0.4795]],\n",
      "\n",
      "        [[0.5193, 0.4807]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4474, 0.5526]],\n",
      "\n",
      "        [[0.5073, 0.4927]],\n",
      "\n",
      "        [[0.5103, 0.4897]],\n",
      "\n",
      "        [[0.5146, 0.4854]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5198, 0.4802]],\n",
      "\n",
      "        [[0.5181, 0.4819]],\n",
      "\n",
      "        [[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5199, 0.4801]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4991, 0.5009]],\n",
      "\n",
      "        [[0.5053, 0.4947]],\n",
      "\n",
      "        [[0.5153, 0.4847]],\n",
      "\n",
      "        [[0.5202, 0.4798]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5075, 0.4925]],\n",
      "\n",
      "        [[0.5199, 0.4801]],\n",
      "\n",
      "        [[0.5129, 0.4871]],\n",
      "\n",
      "        [[0.5151, 0.4849]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5221, 0.4779]],\n",
      "\n",
      "        [[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.5123, 0.4877]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5064, 0.4936]],\n",
      "\n",
      "        [[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5170, 0.4830]],\n",
      "\n",
      "        [[0.5118, 0.4882]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5111, 0.4889]],\n",
      "\n",
      "        [[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5109, 0.4891]],\n",
      "\n",
      "        [[0.4922, 0.5078]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5177, 0.4823]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.4890, 0.5110]],\n",
      "\n",
      "        [[0.5107, 0.4893]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5049, 0.4951]],\n",
      "\n",
      "        [[0.5199, 0.4801]],\n",
      "\n",
      "        [[0.4880, 0.5120]],\n",
      "\n",
      "        [[0.5117, 0.4883]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5130, 0.4870]],\n",
      "\n",
      "        [[0.5203, 0.4797]],\n",
      "\n",
      "        [[0.4585, 0.5415]],\n",
      "\n",
      "        [[0.4962, 0.5038]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5204, 0.4796]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5029, 0.4971]],\n",
      "\n",
      "        [[0.5131, 0.4869]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5230, 0.4770]],\n",
      "\n",
      "        [[0.5246, 0.4754]],\n",
      "\n",
      "        [[0.5159, 0.4841]],\n",
      "\n",
      "        [[0.5103, 0.4897]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5011, 0.4989]],\n",
      "\n",
      "        [[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5135, 0.4865]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5161, 0.4839]],\n",
      "\n",
      "        [[0.5172, 0.4828]],\n",
      "\n",
      "        [[0.5098, 0.4902]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5094, 0.4906]],\n",
      "\n",
      "        [[0.5216, 0.4784]],\n",
      "\n",
      "        [[0.5101, 0.4899]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.5214, 0.4786]],\n",
      "\n",
      "        [[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.4990, 0.5010]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5147, 0.4853]],\n",
      "\n",
      "        [[0.5188, 0.4812]],\n",
      "\n",
      "        [[0.5151, 0.4849]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5107, 0.4893]],\n",
      "\n",
      "        [[0.5116, 0.4884]],\n",
      "\n",
      "        [[0.5156, 0.4844]],\n",
      "\n",
      "        [[0.5097, 0.4903]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5110, 0.4890]],\n",
      "\n",
      "        [[0.5089, 0.4911]],\n",
      "\n",
      "        [[0.5032, 0.4968]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5110, 0.4890]],\n",
      "\n",
      "        [[0.5054, 0.4946]],\n",
      "\n",
      "        [[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.5065, 0.4935]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5204, 0.4796]],\n",
      "\n",
      "        [[0.5245, 0.4755]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5183, 0.4817]],\n",
      "\n",
      "        [[0.5188, 0.4812]],\n",
      "\n",
      "        [[0.5160, 0.4840]],\n",
      "\n",
      "        [[0.5182, 0.4818]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5150, 0.4850]],\n",
      "\n",
      "        [[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5172, 0.4828]],\n",
      "\n",
      "        [[0.5015, 0.4985]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4825, 0.5175]],\n",
      "\n",
      "        [[0.5155, 0.4845]],\n",
      "\n",
      "        [[0.4801, 0.5199]],\n",
      "\n",
      "        [[0.5031, 0.4969]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5077, 0.4923]],\n",
      "\n",
      "        [[0.5242, 0.4758]],\n",
      "\n",
      "        [[0.4925, 0.5075]],\n",
      "\n",
      "        [[0.5216, 0.4784]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5248, 0.4752]],\n",
      "\n",
      "        [[0.5096, 0.4904]],\n",
      "\n",
      "        [[0.5130, 0.4870]],\n",
      "\n",
      "        [[0.5175, 0.4825]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5148, 0.4852]],\n",
      "\n",
      "        [[0.5188, 0.4812]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5151, 0.4849]],\n",
      "\n",
      "        [[0.5232, 0.4768]],\n",
      "\n",
      "        [[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5203, 0.4797]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5104, 0.4896]],\n",
      "\n",
      "        [[0.5197, 0.4803]],\n",
      "\n",
      "        [[0.5082, 0.4918]],\n",
      "\n",
      "        [[0.4617, 0.5383]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5138, 0.4862]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5231, 0.4769]],\n",
      "\n",
      "        [[0.4789, 0.5211]],\n",
      "\n",
      "        [[0.5040, 0.4960]],\n",
      "\n",
      "        [[0.5107, 0.4893]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5150, 0.4850]],\n",
      "\n",
      "        [[0.5205, 0.4795]],\n",
      "\n",
      "        [[0.5088, 0.4912]],\n",
      "\n",
      "        [[0.5147, 0.4853]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5247, 0.4753]],\n",
      "\n",
      "        [[0.5158, 0.4842]],\n",
      "\n",
      "        [[0.5208, 0.4792]],\n",
      "\n",
      "        [[0.5176, 0.4824]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5150, 0.4850]],\n",
      "\n",
      "        [[0.5053, 0.4947]],\n",
      "\n",
      "        [[0.5188, 0.4812]],\n",
      "\n",
      "        [[0.5245, 0.4755]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5099, 0.4901]],\n",
      "\n",
      "        [[0.5203, 0.4797]],\n",
      "\n",
      "        [[0.5160, 0.4840]],\n",
      "\n",
      "        [[0.5213, 0.4787]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5152, 0.4848]],\n",
      "\n",
      "        [[0.5023, 0.4977]],\n",
      "\n",
      "        [[0.5116, 0.4884]],\n",
      "\n",
      "        [[0.5171, 0.4829]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5179, 0.4821]],\n",
      "\n",
      "        [[0.5088, 0.4912]],\n",
      "\n",
      "        [[0.4998, 0.5002]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5048, 0.4952]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5052, 0.4948]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5155, 0.4845]],\n",
      "\n",
      "        [[0.5082, 0.4918]],\n",
      "\n",
      "        [[0.5146, 0.4854]],\n",
      "\n",
      "        [[0.5133, 0.4867]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5116, 0.4884]],\n",
      "\n",
      "        [[0.5126, 0.4874]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5225, 0.4775]],\n",
      "\n",
      "        [[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5104, 0.4896]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.5057, 0.4943]],\n",
      "\n",
      "        [[0.5188, 0.4812]],\n",
      "\n",
      "        [[0.5119, 0.4881]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5015, 0.4985]],\n",
      "\n",
      "        [[0.5232, 0.4768]],\n",
      "\n",
      "        [[0.5182, 0.4818]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5245, 0.4755]],\n",
      "\n",
      "        [[0.5243, 0.4757]],\n",
      "\n",
      "        [[0.5142, 0.4858]],\n",
      "\n",
      "        [[0.5069, 0.4931]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5245, 0.4755]],\n",
      "\n",
      "        [[0.5226, 0.4774]],\n",
      "\n",
      "        [[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5140, 0.4860]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5112, 0.4888]],\n",
      "\n",
      "        [[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5215, 0.4785]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5073, 0.4927]],\n",
      "\n",
      "        [[0.5213, 0.4787]],\n",
      "\n",
      "        [[0.5129, 0.4871]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5092, 0.4908]],\n",
      "\n",
      "        [[0.5140, 0.4860]],\n",
      "\n",
      "        [[0.5052, 0.4948]],\n",
      "\n",
      "        [[0.5083, 0.4917]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5175, 0.4825]],\n",
      "\n",
      "        [[0.5145, 0.4855]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5192, 0.4808]],\n",
      "\n",
      "        [[0.5174, 0.4826]],\n",
      "\n",
      "        [[0.5125, 0.4875]],\n",
      "\n",
      "        [[0.5104, 0.4896]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5183, 0.4817]],\n",
      "\n",
      "        [[0.5143, 0.4857]],\n",
      "\n",
      "        [[0.5193, 0.4807]],\n",
      "\n",
      "        [[0.5136, 0.4864]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5081, 0.4919]],\n",
      "\n",
      "        [[0.5283, 0.4717]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5168, 0.4832]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5237, 0.4763]],\n",
      "\n",
      "        [[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.5218, 0.4782]],\n",
      "\n",
      "        [[0.5129, 0.4871]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5169, 0.4831]],\n",
      "\n",
      "        [[0.5216, 0.4784]],\n",
      "\n",
      "        [[0.5121, 0.4879]],\n",
      "\n",
      "        [[0.5240, 0.4760]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4986, 0.5014]],\n",
      "\n",
      "        [[0.5143, 0.4857]],\n",
      "\n",
      "        [[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5014, 0.4986]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5127, 0.4873]],\n",
      "\n",
      "        [[0.5129, 0.4871]],\n",
      "\n",
      "        [[0.5178, 0.4822]],\n",
      "\n",
      "        [[0.5176, 0.4824]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5079, 0.4921]],\n",
      "\n",
      "        [[0.4840, 0.5160]],\n",
      "\n",
      "        [[0.4932, 0.5068]],\n",
      "\n",
      "        [[0.5175, 0.4825]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5090, 0.4910]],\n",
      "\n",
      "        [[0.5189, 0.4811]],\n",
      "\n",
      "        [[0.5202, 0.4798]],\n",
      "\n",
      "        [[0.5182, 0.4818]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4541, 0.5459]],\n",
      "\n",
      "        [[0.5099, 0.4901]],\n",
      "\n",
      "        [[0.5125, 0.4875]],\n",
      "\n",
      "        [[0.5136, 0.4864]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5177, 0.4823]],\n",
      "\n",
      "        [[0.5112, 0.4888]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.4974, 0.5026]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5124, 0.4876]],\n",
      "\n",
      "        [[0.5128, 0.4872]],\n",
      "\n",
      "        [[0.5167, 0.4833]],\n",
      "\n",
      "        [[0.5104, 0.4896]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5224, 0.4776]],\n",
      "\n",
      "        [[0.5085, 0.4915]],\n",
      "\n",
      "        [[0.5139, 0.4861]],\n",
      "\n",
      "        [[0.5138, 0.4862]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5092, 0.4908]],\n",
      "\n",
      "        [[0.5129, 0.4871]],\n",
      "\n",
      "        [[0.5173, 0.4827]],\n",
      "\n",
      "        [[0.5184, 0.4816]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.5165, 0.4835]],\n",
      "\n",
      "        [[0.5043, 0.4957]],\n",
      "\n",
      "        [[0.5133, 0.4867]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5201, 0.4799]],\n",
      "\n",
      "        [[0.5131, 0.4869]],\n",
      "\n",
      "        [[0.5142, 0.4858]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5161, 0.4839]],\n",
      "\n",
      "        [[0.5056, 0.4944]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5198, 0.4802]],\n",
      "\n",
      "        [[0.5172, 0.4828]],\n",
      "\n",
      "        [[0.5130, 0.4870]],\n",
      "\n",
      "        [[0.5218, 0.4782]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5032, 0.4968]],\n",
      "\n",
      "        [[0.5148, 0.4852]],\n",
      "\n",
      "        [[0.5055, 0.4945]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5124, 0.4876]],\n",
      "\n",
      "        [[0.5165, 0.4835]],\n",
      "\n",
      "        [[0.5180, 0.4820]],\n",
      "\n",
      "        [[0.5126, 0.4874]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4938, 0.5062]],\n",
      "\n",
      "        [[0.5228, 0.4772]],\n",
      "\n",
      "        [[0.5172, 0.4828]],\n",
      "\n",
      "        [[0.5152, 0.4848]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5157, 0.4843]],\n",
      "\n",
      "        [[0.5137, 0.4863]],\n",
      "\n",
      "        [[0.5078, 0.4922]],\n",
      "\n",
      "        [[0.5121, 0.4879]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5088, 0.4912]],\n",
      "\n",
      "        [[0.5112, 0.4888]],\n",
      "\n",
      "        [[0.5175, 0.4825]],\n",
      "\n",
      "        [[0.5077, 0.4923]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5180, 0.4820]],\n",
      "\n",
      "        [[0.5145, 0.4855]],\n",
      "\n",
      "        [[0.5089, 0.4911]],\n",
      "\n",
      "        [[0.5192, 0.4808]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5203, 0.4797]],\n",
      "\n",
      "        [[0.5067, 0.4933]],\n",
      "\n",
      "        [[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5135, 0.4865]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5134, 0.4866]],\n",
      "\n",
      "        [[0.5108, 0.4892]],\n",
      "\n",
      "        [[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5070, 0.4930]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5242, 0.4758]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5156, 0.4844]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5150, 0.4850]],\n",
      "\n",
      "        [[0.4992, 0.5008]],\n",
      "\n",
      "        [[0.5083, 0.4917]],\n",
      "\n",
      "        [[0.5250, 0.4750]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5154, 0.4846]],\n",
      "\n",
      "        [[0.5156, 0.4844]],\n",
      "\n",
      "        [[0.5045, 0.4955]],\n",
      "\n",
      "        [[0.5150, 0.4850]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5146, 0.4854]],\n",
      "\n",
      "        [[0.5196, 0.4804]],\n",
      "\n",
      "        [[0.5188, 0.4812]],\n",
      "\n",
      "        [[0.5127, 0.4873]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5014, 0.4986]],\n",
      "\n",
      "        [[0.4932, 0.5068]],\n",
      "\n",
      "        [[0.4757, 0.5243]],\n",
      "\n",
      "        [[0.5179, 0.4821]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5139, 0.4861]],\n",
      "\n",
      "        [[0.5101, 0.4899]],\n",
      "\n",
      "        [[0.5200, 0.4800]],\n",
      "\n",
      "        [[0.5119, 0.4881]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5247, 0.4753]],\n",
      "\n",
      "        [[0.5162, 0.4838]],\n",
      "\n",
      "        [[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5150, 0.4850]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5145, 0.4855]],\n",
      "\n",
      "        [[0.5105, 0.4895]],\n",
      "\n",
      "        [[0.5095, 0.4905]],\n",
      "\n",
      "        [[0.5080, 0.4920]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5132, 0.4868]],\n",
      "\n",
      "        [[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5113, 0.4887]],\n",
      "\n",
      "        [[0.5108, 0.4892]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4915, 0.5085]],\n",
      "\n",
      "        [[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5234, 0.4766]],\n",
      "\n",
      "        [[0.5073, 0.4927]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5098, 0.4902]],\n",
      "\n",
      "        [[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5239, 0.4761]],\n",
      "\n",
      "        [[0.5138, 0.4862]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4918, 0.5082]],\n",
      "\n",
      "        [[0.5233, 0.4767]],\n",
      "\n",
      "        [[0.4703, 0.5297]],\n",
      "\n",
      "        [[0.5102, 0.4898]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5044, 0.4956]],\n",
      "\n",
      "        [[0.5127, 0.4873]],\n",
      "\n",
      "        [[0.5194, 0.4806]],\n",
      "\n",
      "        [[0.5218, 0.4782]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5136, 0.4864]],\n",
      "\n",
      "        [[0.5115, 0.4885]],\n",
      "\n",
      "        [[0.4921, 0.5079]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5153, 0.4847]],\n",
      "\n",
      "        [[0.5059, 0.4941]],\n",
      "\n",
      "        [[0.5109, 0.4891]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5164, 0.4836]],\n",
      "\n",
      "        [[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5138, 0.4862]],\n",
      "\n",
      "        [[0.5237, 0.4763]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5122, 0.4878]],\n",
      "\n",
      "        [[0.5075, 0.4925]],\n",
      "\n",
      "        [[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5221, 0.4779]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5125, 0.4875]],\n",
      "\n",
      "        [[0.4916, 0.5084]],\n",
      "\n",
      "        [[0.5050, 0.4950]],\n",
      "\n",
      "        [[0.5144, 0.4856]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5178, 0.4822]],\n",
      "\n",
      "        [[0.5126, 0.4874]],\n",
      "\n",
      "        [[0.5134, 0.4866]],\n",
      "\n",
      "        [[0.5097, 0.4903]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5192, 0.4808]],\n",
      "\n",
      "        [[0.5225, 0.4775]],\n",
      "\n",
      "        [[0.5006, 0.4994]],\n",
      "\n",
      "        [[0.5150, 0.4850]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4995, 0.5005]],\n",
      "\n",
      "        [[0.5154, 0.4846]],\n",
      "\n",
      "        [[0.5269, 0.4731]],\n",
      "\n",
      "        [[0.5192, 0.4808]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5090, 0.4910]],\n",
      "\n",
      "        [[0.5108, 0.4892]],\n",
      "\n",
      "        [[0.5106, 0.4894]],\n",
      "\n",
      "        [[0.5079, 0.4921]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5144, 0.4856]],\n",
      "\n",
      "        [[0.5041, 0.4959]],\n",
      "\n",
      "        [[0.5159, 0.4841]],\n",
      "\n",
      "        [[0.5136, 0.4864]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5158, 0.4842]],\n",
      "\n",
      "        [[0.5145, 0.4855]],\n",
      "\n",
      "        [[0.5126, 0.4874]],\n",
      "\n",
      "        [[0.5155, 0.4845]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5208, 0.4792]],\n",
      "\n",
      "        [[0.5250, 0.4750]],\n",
      "\n",
      "        [[0.5154, 0.4846]],\n",
      "\n",
      "        [[0.5113, 0.4887]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5117, 0.4883]],\n",
      "\n",
      "        [[0.5129, 0.4871]],\n",
      "\n",
      "        [[0.5133, 0.4867]],\n",
      "\n",
      "        [[0.5228, 0.4772]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5149, 0.4851]],\n",
      "\n",
      "        [[0.5074, 0.4926]],\n",
      "\n",
      "        [[0.5201, 0.4799]],\n",
      "\n",
      "        [[0.5222, 0.4778]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4869, 0.5131]],\n",
      "\n",
      "        [[0.5195, 0.4805]],\n",
      "\n",
      "        [[0.5114, 0.4886]],\n",
      "\n",
      "        [[0.5104, 0.4896]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5160, 0.4840]],\n",
      "\n",
      "        [[0.4969, 0.5031]],\n",
      "\n",
      "        [[0.5211, 0.4789]],\n",
      "\n",
      "        [[0.5192, 0.4808]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5084, 0.4916]],\n",
      "\n",
      "        [[0.5113, 0.4887]],\n",
      "\n",
      "        [[0.5107, 0.4893]],\n",
      "\n",
      "        [[0.5124, 0.4876]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5118, 0.4882]],\n",
      "\n",
      "        [[0.5159, 0.4841]],\n",
      "\n",
      "        [[0.5145, 0.4855]],\n",
      "\n",
      "        [[0.5146, 0.4854]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5202, 0.4798]],\n",
      "\n",
      "        [[0.5074, 0.4926]],\n",
      "\n",
      "        [[0.5112, 0.4888]],\n",
      "\n",
      "        [[0.5127, 0.4873]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.4966, 0.5034]],\n",
      "\n",
      "        [[0.5206, 0.4794]],\n",
      "\n",
      "        [[0.5183, 0.4817]],\n",
      "\n",
      "        [[0.5100, 0.4900]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5135, 0.4865]],\n",
      "\n",
      "        [[0.5245, 0.4755]],\n",
      "\n",
      "        [[0.5166, 0.4834]],\n",
      "\n",
      "        [[0.5088, 0.4911]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5203, 0.4797]],\n",
      "\n",
      "        [[0.5130, 0.4870]],\n",
      "\n",
      "        [[0.5112, 0.4888]],\n",
      "\n",
      "        [[0.5228, 0.4772]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]],\n",
      "\n",
      "        [[1., 0.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n",
      "output:tensor([[[0.5100, 0.4900]],\n",
      "\n",
      "        [[0.5250, 0.4750]]], grad_fn=<SoftmaxBackward0>)\n",
      "GT:tensor([[[0., 1.]],\n",
      "\n",
      "        [[0., 1.]]], dtype=torch.float64)\n"
     ]
    }
   ],
   "source": [
    "model.eval()\n",
    "for x, y in trainer:\n",
    "    output = model(x)\n",
    "    print(f'output:{output}')\n",
    "    print(f'GT:{y}')\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
