{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "efAL33Rnx_R3",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "efAL33Rnx_R3",
        "outputId": "0129c7eb-ea6a-4ad1-8f01-d86f14a14237"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Mounted at /content/drive\n"
          ]
        }
      ],
      "source": [
        "from google.colab import drive\n",
        "drive.mount('/content/drive')"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "_Ua3QqDgWba9",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "_Ua3QqDgWba9",
        "outputId": "85c057fa-8670-49d0-9a2d-1bdfe737fa59"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "/content/drive/MyDrive/Colab Notebooks/data/flask_deploy_model\n"
          ]
        }
      ],
      "source": [
        "cd /content/drive/MyDrive/Colab Notebooks/data/flask_deploy_model"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "0gzJ7MzPXhy1",
      "metadata": {
        "id": "0gzJ7MzPXhy1"
      },
      "source": [
        "**Creating csv file containing name of videos in dataset with their subfolder to read it later**"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "e3bf4538",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "e3bf4538",
        "outputId": "2f26465a-3d53-4a36-ccd6-d7ae08d6ad8f"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "['abdul', 'arham', 'arqam', 'ghazanfar']\n",
            "['abdul_s1.mp4' 'abdul_s2.mp4' 'abdul_s4.mp4' 'abdul_s3.mp4'\n",
            " 'abdul_s6.mp4' 'abdul_s5.mp4']\n",
            "6\n",
            "['abdullah', 'S1']\n"
          ]
        }
      ],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import csv \n",
        "\n",
        "users = os.listdir(\"/content/drive/MyDrive/videos_ppg/videos\")\n",
        "users = list(filter(lambda x: os.path.isdir(os.path.join(\"/content/drive/MyDrive/videos_ppg/videos\", x)), users))\n",
        "print(users)\n",
        "list_users = []\n",
        "fields=['label','video']\n",
        "filename = \"data_records.csv\"\n",
        "with open(filename, 'w') as csvfile: \n",
        "    # creating a csv writer object \n",
        "    csvwriter = csv.writer(csvfile) \n",
        "    csvwriter.writerow(fields) \n",
        "\n",
        "for i, user in enumerate(sorted(users)):\n",
        "    user_fold = os.path.join(\"/content/drive/MyDrive/videos_ppg/videos/\", user)\n",
        "    #print(user_fold)\n",
        "    user_files = os.listdir(user_fold)\n",
        "    list_users.append(user_files)\n",
        "    \n",
        "arr_users = np.array(list_users)\n",
        "\n",
        "for i in range (4):\n",
        "    for j in range(len(arr_users[i])):\n",
        "        split = arr_users[i][j].split('_')\n",
        "        name = split[0]\n",
        "        video = arr_users[i][j]\n",
        "        row = [name,video]\n",
        "        with open(filename, 'a') as csvfile: \n",
        "            # creating a csv writer object \n",
        "            csvwriter = csv.writer(csvfile) \n",
        "            csvwriter.writerow(row) \n",
        "\n",
        "print(arr_users[0])\n",
        "print(len(arr_users[0]))\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "sent = 'abdullah_S1'\n",
        "stri = sent.split('_')\n",
        "print(stri)\n",
        "str1 = stri[0]"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "fb27d90c",
      "metadata": {
        "id": "fb27d90c"
      },
      "outputs": [],
      "source": [
        "from __future__ import print_function, division\n",
        "import os\n",
        "import torch\n",
        "import pandas as pd\n",
        "#from skimage import io, transform\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "from torch.utils.data import Dataset, DataLoader\n",
        "from torchvision import transforms, utils\n",
        "import cv2\n",
        "# Ignore warnings\n",
        "import warnings\n",
        "from sklearn.metrics import accuracy_score as acc\n",
        "\n",
        "from sklearn.metrics import confusion_matrix\n",
        "\n",
        "from sklearn.metrics import matthews_corrcoef as mcor\n",
        "\n",
        "from sklearn.metrics import precision_score as precision\n",
        "\n",
        "from sklearn.metrics import recall_score as recall\n",
        "\n",
        "from torch.utils.data import DataLoader\n",
        "\n",
        "from torch.utils.tensorboard import SummaryWriter\n",
        "\n",
        "warnings.filterwarnings(\"ignore\")\n",
        "\n",
        "plt.ion()   # interactive mode"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "4xawcg6w5p86",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "4xawcg6w5p86",
        "outputId": "b29d0688-9c95-4602-d6b8-c38428b1dc56"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "0.14.0+cu116\n"
          ]
        }
      ],
      "source": [
        "import torchvision\n",
        "print(torchvision.__version__)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "9BRRnUgoQ4AH",
      "metadata": {
        "id": "9BRRnUgoQ4AH"
      },
      "source": [
        "**Creating Custom Dataset**\n",
        "\n",
        " ---\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "3cdff9a8",
      "metadata": {
        "id": "3cdff9a8"
      },
      "outputs": [],
      "source": [
        "class PpgFramesDataset(Dataset):\n",
        "    \"\"\"Face Landmarks dataset.\"\"\"\n",
        "\n",
        "    def __init__(self, csv_file, root_dir, transform=None):\n",
        "        \"\"\"\n",
        "        Args:\n",
        "            csv_file (string): Path to the csv file with annotations.\n",
        "            root_dir (string): Directory with all the images.\n",
        "            transform (callable, optional): Optional transform to be applied\n",
        "                on a sample.\n",
        "        \"\"\"\n",
        "        \n",
        "       \n",
        "        self.ppg_frames = pd.read_csv(csv_file)\n",
        "        print(self.ppg_frames)\n",
        "        self.root_dir = root_dir\n",
        "        video_name = os.path.join(\"/content/drive/MyDrive/videos_ppg/videos/\"+self.ppg_frames.iloc[0, 0],self.ppg_frames.iloc[0, 1])\n",
        "        print(video_name)\n",
        "        self.transform = transform\n",
        "\n",
        "    def __len__(self):\n",
        "        return len(self.ppg_frames)\n",
        "\n",
        "    def __getitem__(self, idx):\n",
        "        \n",
        "        if torch.is_tensor(idx):\n",
        "            idx = idx.tolist()\n",
        "\n",
        "        video_name = os.path.join(\"/content/drive/MyDrive/videos_ppg/videos/\"+self.ppg_frames.iloc[idx, 0],self.ppg_frames.iloc[idx, 1])\n",
        "        #image = io.imread(img_name)\n",
        "        video = cv2.VideoCapture(video_name)\n",
        "        \n",
        "        i=0\n",
        "        data=None\n",
        "        while(video.isOpened()):\n",
        "            ret, frame = video.read()\n",
        "            if ret == False:\n",
        "                break\n",
        "            #print(frame.shape)\n",
        "            frame=cv2.resize(frame, (256,224))\n",
        "            #input_tensor = preprocess(frame)\n",
        "            #print(frame.shape)\n",
        "            if(i==0):\n",
        "               \n",
        "                data=frame\n",
        "            else:\n",
        "                data=np.concatenate((data,frame),axis=2)\n",
        "            i+=1\n",
        "        video.release()\n",
        "        cv2.destroyAllWindows()\n",
        "        ppgFrames = self.ppg_frames.iloc[idx, 1:]\n",
        "        print(ppgFrames)\n",
        "        ppgFrames = np.array([ppgFrames])\n",
        "        #ppgFrames = ppgFrames.astype('float').reshape(-1, 2)\n",
        "        sample = {'image': data, 'labels': ppgFrames}\n",
        "\n",
        "        if self.transform:\n",
        "            sample = self.transform(sample)\n",
        "\n",
        "        return sample\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "lBBjwadVQHJ_",
      "metadata": {
        "id": "lBBjwadVQHJ_"
      },
      "source": [
        "**Function to calculate accuracy of model**"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "XPtXDZLK1Ky8",
      "metadata": {
        "id": "XPtXDZLK1Ky8"
      },
      "outputs": [],
      "source": [
        "def accuracy(outputs, trues):\n",
        "    \n",
        "    ### Converting preds to 0 or 1\n",
        "    outputs = [1 if outputs[i] >= 0.5 else 0 for i in range(len(outputs))]\n",
        "    print(outputs)\n",
        "    \n",
        "    ### Calculating accuracy by comparing predictions with true labels\n",
        "    acc = [1 if outputs[i] == trues[i] else 0 for i in range(len(outputs))]\n",
        "    \n",
        "    ### Summing over all correct predictions\n",
        "    acc = np.sum(acc) / len(outputs)\n",
        "    \n",
        "    return (acc * 100)"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "svf_c4cmXSpA",
      "metadata": {
        "id": "svf_c4cmXSpA"
      },
      "source": [
        "**Resnet Pytorch Model**"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "UVdz2OlfHu6r",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "UVdz2OlfHu6r",
        "outputId": "b5eb732c-6822-4d99-bb1c-7cd637224f29"
      },
      "outputs": [
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "Downloading: \"https://github.com/pytorch/vision/zipball/v0.10.0\" to /root/.cache/torch/hub/v0.10.0.zip\n"
          ]
        },
        {
          "data": {
            "text/plain": [
              "ResNet(\n",
              "  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
              "  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "  (relu): ReLU(inplace=True)\n",
              "  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
              "  (layer1): Sequential(\n",
              "    (0): BasicBlock(\n",
              "      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "    )\n",
              "    (1): BasicBlock(\n",
              "      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "    )\n",
              "  )\n",
              "  (layer2): Sequential(\n",
              "    (0): BasicBlock(\n",
              "      (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (downsample): Sequential(\n",
              "        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
              "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      )\n",
              "    )\n",
              "    (1): BasicBlock(\n",
              "      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "    )\n",
              "  )\n",
              "  (layer3): Sequential(\n",
              "    (0): BasicBlock(\n",
              "      (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (downsample): Sequential(\n",
              "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
              "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      )\n",
              "    )\n",
              "    (1): BasicBlock(\n",
              "      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "    )\n",
              "  )\n",
              "  (layer4): Sequential(\n",
              "    (0): BasicBlock(\n",
              "      (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (downsample): Sequential(\n",
              "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
              "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      )\n",
              "    )\n",
              "    (1): BasicBlock(\n",
              "      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "      (relu): ReLU(inplace=True)\n",
              "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
              "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
              "    )\n",
              "  )\n",
              "  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
              "  (fc): Linear(in_features=512, out_features=1000, bias=True)\n",
              ")"
            ]
          },
          "execution_count": 9,
          "metadata": {},
          "output_type": "execute_result"
        }
      ],
      "source": [
        "import torch\n",
        "import numpy as np\n",
        "import torch.nn as nn\n",
        "import pickle\n",
        "model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=False)\n",
        "\n",
        "from torchvision import transforms\n",
        "model.eval()\n",
        "#pickle.dump(model,open('model_pkl','wb'))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "150a0634",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 563
        },
        "id": "150a0634",
        "outputId": "7fa03026-fea7-4402-b124-8e8ddc2246ff",
        "scrolled": true
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "['abdul', 'abdul', 'abdul', 'abdul', 'abdul', 'abdul', 'arham', 'arham', 'arham', 'arham', 'arham', 'arham', 'arqam', 'arqam', 'arqam', 'arqam', 'arqam', 'ghazanfar', 'ghazanfar', 'ghazanfar', 'ghazanfar', 'ghazanfar', 'ghazanfar', '']\n",
            "        label             video\n",
            "0       abdul      abdul_s1.mp4\n",
            "1       abdul      abdul_s2.mp4\n",
            "2       abdul      abdul_s4.mp4\n",
            "3       abdul      abdul_s3.mp4\n",
            "4       abdul      abdul_s6.mp4\n",
            "5       abdul      abdul_s5.mp4\n",
            "6       arham      arham_s4.mp4\n",
            "7       arham      arham_s1.mp4\n",
            "8       arham      arham_s2.mp4\n",
            "9       arham      arham_s3.mp4\n",
            "10      arham      arham_s6.mp4\n",
            "11      arham      arham_s5.mp4\n",
            "12      arqam      arqam_s2.mp4\n",
            "13      arqam      arqam_s3.mp4\n",
            "14      arqam      arqam_s1.mp4\n",
            "15      arqam      arqam_s6.mp4\n",
            "16      arqam      arqam_s5.mp4\n",
            "17      arqam      arqam_s4.mp4\n",
            "18  ghazanfar  ghazanfar_s3.mp4\n",
            "19  ghazanfar  ghazanfar_s2.mp4\n",
            "20  ghazanfar  ghazanfar_s1.mp4\n",
            "21  ghazanfar  ghazanfar_s6.mp4\n",
            "22  ghazanfar  ghazanfar_s5.mp4\n",
            "23  ghazanfar  ghazanfar_s4.mp4\n",
            "/content/drive/MyDrive/videos_ppg/videos/abdul/abdul_s1.mp4\n",
            "length 24\n"
          ]
        },
        {
          "data": {
            "text/plain": [
              "<Figure size 432x288 with 0 Axes>"
            ]
          },
          "metadata": {},
          "output_type": "display_data"
        }
      ],
      "source": [
        "\n",
        "with open(\"/content/drive/MyDrive/videos_ppg/persons2.txt\", \"r\") as f:\n",
        "  categories = [s.strip() for s in f.readlines()]\n",
        "\n",
        "print(categories)\n",
        "# a temporary list to store the string labels\n",
        "trues = []\n",
        "outputs = []\n",
        "\n",
        "# dictionary that maps integer to its string value \n",
        "label_dict = {}\n",
        "\n",
        "# list to store integer labels \n",
        "int_labels = []\n",
        "\n",
        "for i in range(len(categories)):\n",
        "    label_dict[i] = categories[i]\n",
        "    int_labels.append(i)\n",
        "\n",
        "\n",
        "criterion = nn.SmoothL1Loss()\n",
        "\n",
        "def tens(sample):\n",
        "  size = sample.shape[2]\n",
        " \n",
        "  print('size',size)\n",
        "  model.conv1 = nn.Conv2d(size, 64, kernel_size=7, stride=2, padding=3,bias=False)\n",
        "  model.fc = nn.Linear(512,24)\n",
        "  preprocess = transforms.Compose([\n",
        "      transforms.ToTensor(),\n",
        "\n",
        "      transforms.Normalize(mean=np.full((size), 0.485, dtype=float), std=np.full((size), 0.229, dtype=float)),\n",
        "  ])\n",
        "\n",
        "  input_tensor = preprocess(sample)\n",
        "  print('input_tenor',input_tensor)\n",
        "  return input_tensor\n",
        "\n",
        "ppg_dataset = PpgFramesDataset(csv_file='data_records.csv',\n",
        "                                    root_dir=\"\")\n",
        "\n",
        "fig = plt.figure()\n",
        "print('length',len(ppg_dataset))\n",
        "#pickle.dump(model,open('model_pkl','wb'))\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "35NP95HEpXcg",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "35NP95HEpXcg",
        "outputId": "9f7f307c-ce9f-420d-9471-34f86eee4041"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "video    abdul_s1.mp4\n",
            "Name: 0, dtype: object\n",
            "0 (224, 256, 5691) (1, 1)\n",
            "5691\n",
            "abdul\n",
            "size 5691\n",
            "input_tenor tensor([[[-2.1008, -2.1008, -2.0837,  ..., -2.0837, -2.0665, -2.0837],\n",
            "         [-2.0837, -2.1008, -2.0837,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.0837, -2.1008, -2.1008,  ..., -2.1008, -2.0494, -2.1008],\n",
            "         ...,\n",
            "         [-2.1179, -2.1008, -2.1008,  ..., -2.0665, -2.0837, -2.0837],\n",
            "         [-2.1179, -2.1008, -2.1179,  ..., -2.0837, -2.0665, -2.0665],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.0837, -2.0837, -2.0837]],\n",
            "\n",
            "        [[-1.5014, -1.5014, -1.4843,  ..., -1.8097, -1.7925, -1.7925],\n",
            "         [-1.4843, -1.5014, -1.4843,  ..., -1.8097, -1.8097, -1.7925],\n",
            "         [-1.4843, -1.5014, -1.5014,  ..., -1.8268, -1.7754, -1.8097],\n",
            "         ...,\n",
            "         [-1.5185, -1.5014, -1.5014,  ..., -1.7925, -1.8097, -1.8097],\n",
            "         [-1.5185, -1.5014, -1.5185,  ..., -1.8097, -1.7925, -1.7925],\n",
            "         [-1.5185, -1.5357, -1.5357,  ..., -1.8097, -1.8097, -1.8097]],\n",
            "\n",
            "        [[ 1.6495,  1.6495,  1.6667,  ...,  1.0673,  1.0673,  1.0159],\n",
            "         [ 1.6667,  1.6495,  1.6667,  ...,  1.0673,  1.0502,  1.0159],\n",
            "         [ 1.6667,  1.6495,  1.6495,  ...,  1.0502,  1.0844,  0.9988],\n",
            "         ...,\n",
            "         [ 1.6324,  1.6495,  1.6495,  ...,  1.0844,  1.0673,  1.0673],\n",
            "         [ 1.6324,  1.6495,  1.6324,  ...,  1.0673,  1.0844,  1.0844],\n",
            "         [ 1.6324,  1.6153,  1.6153,  ...,  1.0673,  1.0673,  1.0673]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         [-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         [-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         ...,\n",
            "         [-0.9705, -0.9705, -0.9705,  ..., -1.6042, -1.6042, -1.6042],\n",
            "         [-0.9705, -0.9705, -0.9705,  ..., -1.6042, -1.6042, -1.6042],\n",
            "         [-0.9877, -0.9877, -0.9877,  ..., -1.6213, -1.6213, -1.6213]],\n",
            "\n",
            "        [[ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         ...,\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.9578,  1.9578,  1.9578,  ...,  1.1529,  1.1529,  1.1529]]])\n",
            "torch.Size([1, 5691, 224, 256])\n",
            "tensor([ 6.4414,  3.8476,  1.3107,  0.7566,  2.7971,  1.2442,  2.3199, -3.2752,\n",
            "         5.6147,  6.0276,  1.8070, -7.1180, -5.7541,  0.2899, -1.4235,  1.2666,\n",
            "        -2.1253, -2.7765,  4.9671,  0.8460,  0.1525, -3.7366,  3.0220,  0.0183])\n",
            "tensor(11.0878)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3972, 0.2626, 0.1738, 0.0909, 0.0297]),\n",
            "indices=tensor([ 0,  9,  8, 18,  1]))\n",
            "true\n",
            "abdul 0.39718687534332275\n",
            "arham 0.26258671283721924\n",
            "arham 0.17377129197120667\n",
            "ghazanfar 0.09093323349952698\n",
            "abdul 0.029684128239750862\n",
            "video    abdul_s2.mp4\n",
            "Name: 1, dtype: object\n",
            "1 (224, 256, 5688) (1, 1)\n",
            "5688\n",
            "abdul\n",
            "size 5688\n",
            "input_tenor tensor([[[-2.1008, -2.1008, -2.0837,  ..., -2.0837, -2.0665, -2.0837],\n",
            "         [-2.0837, -2.1008, -2.0837,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.0837, -2.1008, -2.1008,  ..., -2.1008, -2.0494, -2.1008],\n",
            "         ...,\n",
            "         [-2.1179, -2.1008, -2.1008,  ..., -2.0665, -2.0837, -2.0837],\n",
            "         [-2.1179, -2.1008, -2.1179,  ..., -2.0837, -2.0665, -2.0665],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.0837, -2.0837, -2.0837]],\n",
            "\n",
            "        [[-1.5014, -1.5014, -1.4843,  ..., -1.8097, -1.7925, -1.7925],\n",
            "         [-1.4843, -1.5014, -1.4843,  ..., -1.8097, -1.8097, -1.7925],\n",
            "         [-1.4843, -1.5014, -1.5014,  ..., -1.8268, -1.7754, -1.8097],\n",
            "         ...,\n",
            "         [-1.5185, -1.5014, -1.5014,  ..., -1.7925, -1.8097, -1.8097],\n",
            "         [-1.5185, -1.5014, -1.5185,  ..., -1.8097, -1.7925, -1.7925],\n",
            "         [-1.5185, -1.5357, -1.5357,  ..., -1.8097, -1.8097, -1.8097]],\n",
            "\n",
            "        [[ 1.6495,  1.6495,  1.6667,  ...,  1.0673,  1.0673,  1.0159],\n",
            "         [ 1.6667,  1.6495,  1.6667,  ...,  1.0673,  1.0502,  1.0159],\n",
            "         [ 1.6667,  1.6495,  1.6495,  ...,  1.0502,  1.0844,  0.9988],\n",
            "         ...,\n",
            "         [ 1.6324,  1.6495,  1.6495,  ...,  1.0844,  1.0673,  1.0673],\n",
            "         [ 1.6324,  1.6495,  1.6324,  ...,  1.0673,  1.0844,  1.0844],\n",
            "         [ 1.6324,  1.6153,  1.6153,  ...,  1.0673,  1.0673,  1.0673]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         [-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         [-0.9020, -0.9020, -0.9020,  ..., -1.5528, -1.5528, -1.5528],\n",
            "         ...,\n",
            "         [-0.9705, -0.9705, -0.9705,  ..., -1.6042, -1.6042, -1.6042],\n",
            "         [-0.9705, -0.9705, -0.9705,  ..., -1.6042, -1.6042, -1.6042],\n",
            "         [-0.9877, -0.9877, -0.9877,  ..., -1.6213, -1.6213, -1.6213]],\n",
            "\n",
            "        [[ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         ...,\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.9578,  1.9578,  1.9578,  ...,  1.1529,  1.1529,  1.1529]]])\n",
            "torch.Size([1, 5688, 224, 256])\n",
            "tensor([-0.6658, -3.0815, -0.1437,  1.2111,  1.4256,  1.9585,  3.8757, -1.4961,\n",
            "        -2.3176,  0.9009,  4.6767,  0.9202,  1.9852,  4.8550,  1.5065, -2.3093,\n",
            "         3.4816,  2.0167, -1.8689,  2.8365, -0.0837, -1.7753,  2.4891,  3.7988])\n",
            "tensor(9.9941)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2970, 0.2485, 0.1115, 0.1033, 0.0752]),\n",
            "indices=tensor([13, 10,  6, 23, 16]))\n",
            "false\n",
            "arqam 0.29701510071754456\n",
            "arham 0.24849079549312592\n",
            "arham 0.1115427240729332\n",
            " 0.10329383611679077\n",
            "arqam 0.07521604746580124\n",
            "video    abdul_s4.mp4\n",
            "Name: 2, dtype: object\n",
            "2 (224, 256, 5724) (1, 1)\n",
            "5724\n",
            "abdul\n",
            "size 5724\n",
            "input_tenor tensor([[[-1.9295, -1.9295, -1.9295,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.9295, -1.9467, -1.9295,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.9295, -1.9467, -1.9295,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         ...,\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.9638, -1.9638, -1.9809,  ..., -2.0152, -2.0152, -1.9980],\n",
            "         [-1.9638, -1.9638, -1.9809,  ..., -2.0152, -2.0152, -2.0323]],\n",
            "\n",
            "        [[-1.6384, -1.6384, -1.6384,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.6384, -1.6555, -1.6384,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.6384, -1.6555, -1.6384,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         ...,\n",
            "         [-1.6213, -1.6213, -1.6213,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.6213, -1.6213, -1.6384,  ..., -1.9124, -1.9124, -1.8953],\n",
            "         [-1.6213, -1.6213, -1.6213,  ..., -1.9124, -1.9124, -1.9295]],\n",
            "\n",
            "        [[ 1.8379,  1.8379,  1.8379,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         [ 1.8379,  1.8208,  1.8550,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         [ 1.8379,  1.8208,  1.8379,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         ...,\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.8379,  1.8379,  1.8208,  ...,  1.1187,  1.1187,  1.1358],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.1187,  1.1187,  1.1015]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.8439, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7925, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1633,  2.1462,  2.1462,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1462,  2.1462,  2.1462,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1290,  2.1462,  2.1462,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         ...,\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.3927,  1.3927,  1.3927],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.3755,  1.3755,  1.3755]]])\n",
            "torch.Size([1, 5724, 224, 256])\n",
            "tensor([-0.0101, -2.8291,  0.9848, -2.5944,  1.9220, -0.4873, -0.9990, -1.0158,\n",
            "        -2.6028,  1.1811, -0.6658,  1.5469, -3.9226,  3.2819,  4.0150,  0.3285,\n",
            "        -3.6823, -1.0315, -1.0592, -2.8916, -3.2925,  3.5254, -1.5655,  0.7673])\n",
            "tensor(11.4828)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3927, 0.2407, 0.1887, 0.0484, 0.0333]),\n",
            "indices=tensor([14, 21, 13,  4, 11]))\n",
            "false\n",
            "arqam 0.39270278811454773\n",
            "ghazanfar 0.24067825078964233\n",
            "arqam 0.1886609047651291\n",
            "abdul 0.04842609539628029\n",
            "arham 0.03327977657318115\n",
            "video    abdul_s3.mp4\n",
            "Name: 3, dtype: object\n",
            "3 (224, 256, 5688) (1, 1)\n",
            "5688\n",
            "abdul\n",
            "size 5688\n",
            "input_tenor tensor([[[-2.0837, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.0665, -2.0665],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         ...,\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.0665, -2.0665, -2.0665]],\n",
            "\n",
            "        [[-2.1008, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.0837,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         ...,\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 1.7352,  1.7180,  1.7180,  ...,  1.1358,  1.1358,  1.1358],\n",
            "         [ 1.7180,  1.7180,  1.6838,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.7352,  1.7352,  1.7009,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         ...,\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2214,  1.2214,  1.2214]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 1.8722,  1.8722,  1.8722,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         ...,\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.4098,  1.4098,  1.4098]]])\n",
            "torch.Size([1, 5688, 224, 256])\n",
            "tensor([-3.3505, -2.8740,  4.2844,  0.7308,  0.5889, -6.0123, -1.5567, -4.6048,\n",
            "         2.1748,  0.4139,  3.3899, -4.0435,  2.1848,  1.1857,  1.0565,  2.4681,\n",
            "         3.3912, -0.0758,  1.8287,  1.1587, -6.2601,  0.1415,  1.4877, -1.5139])\n",
            "tensor(11.3490)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3831, 0.1568, 0.1566, 0.0623, 0.0469]),\n",
            "indices=tensor([ 2, 16, 10, 15, 12]))\n",
            "true\n",
            "abdul 0.3830574154853821\n",
            "arqam 0.1567945033311844\n",
            "arham 0.15659016370773315\n",
            "arqam 0.06229481101036072\n",
            "arqam 0.046923283487558365\n",
            "video    abdul_s6.mp4\n",
            "Name: 4, dtype: object\n",
            "4 (224, 256, 5688) (1, 1)\n",
            "5688\n",
            "abdul\n",
            "size 5688\n",
            "input_tenor tensor([[[-2.1179, -2.1179, -2.1179,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1179, -2.1179,  ..., -2.1008, -2.1179, -2.0837],\n",
            "         [-2.1179, -2.1179, -2.1008,  ..., -2.1008, -2.0837, -2.0837],\n",
            "         ...,\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0494, -2.0494, -2.0665],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0665, -2.0665, -2.0665]],\n",
            "\n",
            "        [[-0.4397, -0.4397, -0.4397,  ..., -0.9877, -0.9877, -0.9877],\n",
            "         [-0.4226, -0.4397, -0.4568,  ..., -0.9877, -1.0048, -0.9705],\n",
            "         [-0.4397, -0.4397, -0.4226,  ..., -0.9877, -0.9705, -0.9705],\n",
            "         ...,\n",
            "         [-0.5596, -0.5596, -0.5596,  ..., -1.1075, -1.1075, -1.1247],\n",
            "         [-0.5596, -0.5596, -0.5596,  ..., -1.1247, -1.1247, -1.1247],\n",
            "         [-0.5767, -0.5767, -0.5767,  ..., -1.1247, -1.1247, -1.1247]],\n",
            "\n",
            "        [[ 1.6324,  1.6324,  1.6324,  ...,  1.0159,  1.0159,  1.0159],\n",
            "         [ 1.6495,  1.6324,  1.6153,  ...,  1.0159,  0.9988,  1.0331],\n",
            "         [ 1.6324,  1.6324,  1.6495,  ...,  1.0159,  1.0331,  1.0331],\n",
            "         ...,\n",
            "         [ 1.5810,  1.5810,  1.5810,  ...,  0.9303,  0.9303,  0.9132],\n",
            "         [ 1.5810,  1.5810,  1.5810,  ...,  0.9132,  0.9132,  0.9132],\n",
            "         [ 1.5639,  1.5639,  1.5639,  ...,  0.9132,  0.9132,  0.9132]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-0.3712, -0.3712, -0.3883,  ..., -1.0219, -1.0219, -1.0219],\n",
            "         [-0.3712, -0.3712, -0.3712,  ..., -1.0390, -1.0390, -1.0390],\n",
            "         [-0.3712, -0.3712, -0.3712,  ..., -1.0390, -1.0390, -1.0390],\n",
            "         ...,\n",
            "         [-0.4739, -0.4739, -0.4739,  ..., -1.1418, -1.1418, -1.1418],\n",
            "         [-0.4911, -0.4911, -0.4911,  ..., -1.1418, -1.1418, -1.1418],\n",
            "         [-0.4911, -0.4911, -0.4911,  ..., -1.1247, -1.1247, -1.1247]],\n",
            "\n",
            "        [[ 1.8722,  1.8722,  1.8550,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         ...,\n",
            "         [ 1.7180,  1.7180,  1.7180,  ...,  1.0331,  1.0331,  1.0331],\n",
            "         [ 1.7523,  1.7523,  1.7523,  ...,  1.0331,  1.0331,  1.0331],\n",
            "         [ 1.7523,  1.7523,  1.7523,  ...,  1.0159,  1.0159,  1.0159]]])\n",
            "torch.Size([1, 5688, 224, 256])\n",
            "tensor([ 1.2441,  0.0266, -0.2475, -0.0534,  0.2998,  1.7098, -0.7376, -0.7489,\n",
            "         0.8777, -0.2838, -0.3182, -0.1789,  1.5164, -0.5569,  1.8646,  0.5377,\n",
            "         0.7044,  0.1683,  0.8194,  0.5862,  1.0644,  1.3548,  0.4336,  0.2637])\n",
            "tensor(10.6726)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.1318, 0.1129, 0.0931, 0.0792, 0.0709]),\n",
            "indices=tensor([14,  5, 12, 21,  0]))\n",
            "false\n",
            "arqam 0.13180236518383026\n",
            "abdul 0.11289761960506439\n",
            "arqam 0.093050017952919\n",
            "ghazanfar 0.07916493713855743\n",
            "abdul 0.07086684554815292\n",
            "video    abdul_s5.mp4\n",
            "Name: 5, dtype: object\n",
            "5 (224, 256, 5832) (1, 1)\n",
            "5832\n",
            "abdul\n",
            "size 5832\n",
            "input_tenor tensor([[[-2.1008, -2.1008, -2.1008,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         ...,\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0837, -2.0837, -2.0837]],\n",
            "\n",
            "        [[-1.1075, -1.1075, -1.1075,  ..., -1.4843, -1.4843, -1.4843],\n",
            "         [-1.1075, -1.1075, -1.1075,  ..., -1.4843, -1.4843, -1.4843],\n",
            "         [-1.0904, -1.0904, -1.0904,  ..., -1.4843, -1.4843, -1.4843],\n",
            "         ...,\n",
            "         [-1.1760, -1.1760, -1.1760,  ..., -1.5699, -1.5699, -1.5699],\n",
            "         [-1.1760, -1.1760, -1.1760,  ..., -1.5699, -1.5699, -1.5699],\n",
            "         [-1.2103, -1.2103, -1.2103,  ..., -1.5870, -1.5870, -1.5870]],\n",
            "\n",
            "        [[ 1.7865,  1.7865,  1.7865,  ...,  1.2214,  1.2214,  1.2214],\n",
            "         [ 1.7865,  1.7865,  1.7865,  ...,  1.2214,  1.2214,  1.2214],\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2214,  1.2214,  1.2214],\n",
            "         ...,\n",
            "         [ 1.6838,  1.6838,  1.6838,  ...,  1.1187,  1.1187,  1.1187],\n",
            "         [ 1.6838,  1.6838,  1.6838,  ...,  1.1187,  1.1187,  1.1187],\n",
            "         [ 1.6495,  1.6495,  1.6495,  ...,  1.1015,  1.1015,  1.1015]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.3130, -1.3130, -1.3130,  ..., -1.7069, -1.7069, -1.7069],\n",
            "         [-1.3130, -1.3130, -1.3130,  ..., -1.7069, -1.7069, -1.7069],\n",
            "         [-1.3130, -1.3130, -1.3130,  ..., -1.7069, -1.7069, -1.7069],\n",
            "         ...,\n",
            "         [-1.4672, -1.4672, -1.4672,  ..., -1.8782, -1.8782, -1.8782],\n",
            "         [-1.4843, -1.4843, -1.4843,  ..., -1.8782, -1.8782, -1.8782],\n",
            "         [-1.4672, -1.4672, -1.4672,  ..., -1.8782, -1.8782, -1.8782]],\n",
            "\n",
            "        [[ 2.0777,  2.0777,  2.0777,  ...,  1.5125,  1.5125,  1.5125],\n",
            "         [ 2.0777,  2.0777,  2.0777,  ...,  1.5125,  1.5125,  1.5125],\n",
            "         [ 2.0777,  2.0777,  2.0777,  ...,  1.5125,  1.5125,  1.5125],\n",
            "         ...,\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2385,  1.2385,  1.2385],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2385,  1.2385,  1.2385],\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2385,  1.2385,  1.2385]]])\n",
            "torch.Size([1, 5832, 224, 256])\n",
            "tensor([ 1.0820, -1.1147, -1.1196,  1.9269, -3.5745,  4.2298, -0.7337, -0.1238,\n",
            "         0.3008, -1.0897, -4.5751, -2.1393,  6.0126, -1.4889,  3.4255,  1.5621,\n",
            "         0.7072,  0.4609, -0.6016,  1.7899, -1.7669,  0.2773, -3.7848,  4.3094])\n",
            "tensor(10.9258)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.6668, 0.1214, 0.1121, 0.0502, 0.0112]),\n",
            "indices=tensor([12, 23,  5, 14,  3]))\n",
            "false\n",
            "arqam 0.6667613983154297\n",
            " 0.12142475694417953\n",
            "abdul 0.11213234066963196\n",
            "arqam 0.050165604799985886\n",
            "abdul 0.011209907941520214\n",
            "video    arham_s4.mp4\n",
            "Name: 6, dtype: object\n",
            "6 (224, 256, 5754) (1, 1)\n",
            "5754\n",
            "arham\n",
            "size 5754\n",
            "input_tenor tensor([[[-1.9638, -1.9809, -1.9980,  ..., -2.0494, -2.0494, -2.0665],\n",
            "         [-1.9809, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0665],\n",
            "         [-1.9809, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0665],\n",
            "         ...,\n",
            "         [-1.9980, -1.9809, -1.9809,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0494]],\n",
            "\n",
            "        [[-1.6898, -1.6898, -1.6898,  ..., -1.9295, -1.9295, -1.9467],\n",
            "         [-1.6898, -1.7069, -1.7069,  ..., -1.9295, -1.9295, -1.9467],\n",
            "         [-1.6898, -1.7069, -1.7069,  ..., -1.9295, -1.9295, -1.9467],\n",
            "         ...,\n",
            "         [-1.7240, -1.7069, -1.7069,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7240, -1.7240, -1.7240,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.7069, -1.7069, -1.7069,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[ 2.0948,  2.0263,  1.9749,  ...,  1.4098,  1.4098,  1.3927],\n",
            "         [ 2.0434,  2.0092,  2.0092,  ...,  1.4098,  1.4098,  1.3927],\n",
            "         [ 2.0263,  2.0092,  2.0092,  ...,  1.4098,  1.4098,  1.3927],\n",
            "         ...,\n",
            "         [ 1.9407,  1.9578,  1.9578,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 1.9407,  1.9407,  1.9407,  ...,  1.3070,  1.3070,  1.3070],\n",
            "         [ 1.9235,  1.9235,  1.9235,  ...,  1.3413,  1.3413,  1.3413]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.9467, -1.9467, -1.9467,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9467, -1.9467, -1.9467,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9467, -1.9467, -1.9467,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2318,  2.2318,  2.2318,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         ...,\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4783,  1.4783,  1.4612],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4783,  1.4783,  1.4783]]])\n",
            "torch.Size([1, 5754, 224, 256])\n",
            "tensor([-1.9933e+00,  2.2655e+00, -1.0295e+00,  1.5671e+00, -2.0186e-01,\n",
            "         3.5655e+00, -3.2076e+00, -5.3901e-01, -5.4996e-01, -1.4705e-01,\n",
            "         1.0546e+00, -7.1381e-01, -1.4619e+00, -1.0046e+00,  1.8139e-01,\n",
            "         5.8369e-01, -6.7824e-01, -1.2033e+00,  4.1490e-01,  3.5198e-01,\n",
            "        -7.2738e-01, -1.5584e+00,  9.4872e-04, -1.3597e+00])\n",
            "tensor(11.3717)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.5373, 0.1464, 0.0728, 0.0436, 0.0272]),\n",
            "indices=tensor([ 5,  1,  3, 10, 15]))\n",
            "false\n",
            "abdul 0.5372838973999023\n",
            "abdul 0.14643393456935883\n",
            "abdul 0.07283519953489304\n",
            "arham 0.043625395745038986\n",
            "arqam 0.02724205143749714\n",
            "video    arham_s1.mp4\n",
            "Name: 7, dtype: object\n",
            "7 (224, 256, 5658) (1, 1)\n",
            "5658\n",
            "arham\n",
            "size 5658\n",
            "input_tenor tensor([[[-1.3473, -1.3302, -1.3130,  ..., -1.5699, -1.5699, -1.5699],\n",
            "         [-1.3130, -1.3130, -1.3130,  ..., -1.5699, -1.5699, -1.5699],\n",
            "         [-1.3302, -1.3130, -1.3130,  ..., -1.6042, -1.6042, -1.6042],\n",
            "         ...,\n",
            "         [-1.1247, -1.1247, -1.1247,  ..., -1.5357, -1.5357, -1.5357],\n",
            "         [-1.0904, -1.0904, -1.0904,  ..., -1.5357, -1.5357, -1.5357],\n",
            "         [-1.1075, -1.1075, -1.1075,  ..., -1.5014, -1.5014, -1.5014]],\n",
            "\n",
            "        [[-2.0323, -2.0152, -1.9980,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-2.0152, -1.9980, -1.9980,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         ...,\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.0152, -2.0152, -2.0152]],\n",
            "\n",
            "        [[ 1.9578,  1.9749,  1.9920,  ...,  1.5297,  1.5297,  1.5297],\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.5297,  1.5297,  1.5297],\n",
            "         [ 1.9749,  1.9920,  1.9920,  ...,  1.4954,  1.4954,  1.4954],\n",
            "         ...,\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5810,  1.5810,  1.5810]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-1.5014, -1.5014, -1.5014,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         [-1.4843, -1.5014, -1.5014,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         [-1.5014, -1.4843, -1.4843,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         ...,\n",
            "         [-1.2617, -1.2617, -1.2617,  ..., -1.7412, -1.7412, -1.7412],\n",
            "         [-1.2617, -1.2445, -1.2445,  ..., -1.7412, -1.7412, -1.7412],\n",
            "         [-1.2274, -1.2445, -1.2445,  ..., -1.7412, -1.7412, -1.7412]],\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0494, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1975,  2.1975,  2.1975,  ...,  1.6324,  1.6324,  1.6324],\n",
            "         [ 2.2147,  2.1975,  2.1975,  ...,  1.6324,  1.6324,  1.6324],\n",
            "         [ 2.1975,  2.2147,  2.2147,  ...,  1.6324,  1.6324,  1.6324],\n",
            "         ...,\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.7352,  1.7352,  1.7352],\n",
            "         [ 2.2318,  2.2489,  2.2489,  ...,  1.7352,  1.7352,  1.7352],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.7694,  1.7694,  1.7694]]])\n",
            "torch.Size([1, 5658, 224, 256])\n",
            "tensor([ 1.4319, -1.6179, -0.9276, -0.8937, -1.0352,  0.1845,  0.5834, -0.7282,\n",
            "        -1.2692,  1.0467, -1.3414,  2.3278,  2.0180, -0.3311, -2.5046, -0.4920,\n",
            "        -1.1095, -3.8931, -0.8094,  1.9090,  3.3180, -0.9282,  0.3210,  1.7170])\n",
            "tensor(11.2453)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3726, 0.1384, 0.1015, 0.0911, 0.0751]),\n",
            "indices=tensor([20, 11, 12, 19, 23]))\n",
            "false\n",
            "ghazanfar 0.37259334325790405\n",
            "arham 0.13840916752815247\n",
            "arqam 0.1015385314822197\n",
            "ghazanfar 0.09105122089385986\n",
            " 0.07514803856611252\n",
            "video    arham_s2.mp4\n",
            "Name: 8, dtype: object\n",
            "8 (224, 256, 5781) (1, 1)\n",
            "5781\n",
            "arham\n",
            "size 5781\n",
            "input_tenor tensor([[[-2.0837, -2.0837, -2.0665,  ..., -2.1179, -2.1008, -2.1008],\n",
            "         [-2.0837, -2.0665, -2.0665,  ..., -2.1179, -2.1008, -2.1179],\n",
            "         [-2.0837, -2.0665, -2.0665,  ..., -2.1008, -2.1008, -2.1179],\n",
            "         ...,\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1008, -2.1008, -2.1008]],\n",
            "\n",
            "        [[-2.0665, -2.0494, -2.0494,  ..., -2.1179, -2.1008, -2.1179],\n",
            "         [-2.0665, -2.0494, -2.0494,  ..., -2.1179, -2.1008, -2.1179],\n",
            "         [-2.0665, -2.0494, -2.0494,  ..., -2.1008, -2.1008, -2.1179],\n",
            "         ...,\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.0837, -2.0837, -2.0837]],\n",
            "\n",
            "        [[ 1.9064,  1.9235,  1.9235,  ...,  1.2728,  1.3242,  1.3755],\n",
            "         [ 1.9064,  1.9235,  1.9235,  ...,  1.3242,  1.3413,  1.3242],\n",
            "         [ 1.9064,  1.9235,  1.9235,  ...,  1.3413,  1.3242,  1.3242],\n",
            "         ...,\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2043,  1.2043,  1.2043]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1008, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1804,  2.1804,  2.1804,  ...,  1.4612,  1.4269,  1.4269],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.4612,  1.4269,  1.4269],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.4783,  1.4440,  1.4440],\n",
            "         ...,\n",
            "         [ 2.1119,  2.0948,  2.0948,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.1119,  2.0948,  2.0948,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.3584,  1.3584,  1.3584]]])\n",
            "torch.Size([1, 5781, 224, 256])\n",
            "tensor([-0.7507, -0.7299,  0.5048,  2.3443,  3.3009, -0.3660, -3.7826, -0.6323,\n",
            "        -2.6312, -5.4035, -0.6872,  1.8391,  2.0130, -2.9103,  0.2556, -1.7448,\n",
            "         3.2868, -0.3171,  1.6380, -1.2627,  1.3736, -1.1167, -0.6094, -0.0420])\n",
            "tensor(11.2736)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2827, 0.2787, 0.1086, 0.0780, 0.0655]),\n",
            "indices=tensor([ 4, 16,  3, 12, 11]))\n",
            "false\n",
            "abdul 0.28271329402923584\n",
            "arqam 0.2787381708621979\n",
            "abdul 0.10860946029424667\n",
            "arqam 0.07798723876476288\n",
            "arham 0.06553923338651657\n",
            "video    arham_s3.mp4\n",
            "Name: 9, dtype: object\n",
            "9 (224, 256, 5787) (1, 1)\n",
            "5787\n",
            "arham\n",
            "size 5787\n",
            "input_tenor tensor([[[-1.9809, -2.0152, -2.0323,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-2.0323, -2.0152, -1.9809,  ..., -2.0323, -2.0152, -2.0152],\n",
            "         [-1.9809, -2.0152, -2.0152,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         ...,\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0323, -2.0323, -2.0494],\n",
            "         [-1.9809, -1.9980, -1.9980,  ..., -2.0152, -1.9980, -2.0323],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0323, -2.0494, -2.0323]],\n",
            "\n",
            "        [[-1.6213, -1.6555, -1.6727,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.6727, -1.6555, -1.6213,  ..., -1.9295, -1.9124, -1.9124],\n",
            "         [-1.6213, -1.6555, -1.6555,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         ...,\n",
            "         [-1.8097, -1.8097, -1.8097,  ..., -2.0152, -2.0323, -2.0323],\n",
            "         [-1.7925, -1.8097, -1.8097,  ..., -1.9980, -1.9809, -2.0152],\n",
            "         [-1.8097, -1.8097, -1.8097,  ..., -2.0323, -2.0323, -2.0152]],\n",
            "\n",
            "        [[ 2.1462,  2.1119,  2.0948,  ...,  1.4098,  1.3927,  1.4098],\n",
            "         [ 2.0948,  2.1119,  2.1462,  ...,  1.4440,  1.4612,  1.4612],\n",
            "         [ 2.1290,  2.1119,  2.1119,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         ...,\n",
            "         [ 1.7865,  1.7865,  1.7865,  ...,  1.1529,  1.1529,  1.1358],\n",
            "         [ 1.8037,  1.7865,  1.7865,  ...,  1.1700,  1.1872,  1.1529],\n",
            "         [ 1.7865,  1.7865,  1.7865,  ...,  1.1700,  1.1529,  1.1529]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.8439, -1.8439, -1.8439,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2147,  2.2147,  2.2147,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         ...,\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.3413,  1.3413,  1.3413]]])\n",
            "torch.Size([1, 5787, 224, 256])\n",
            "tensor([-6.4922,  5.9470, -3.8690,  1.3968,  1.8573,  3.1953, -5.0601, -0.7543,\n",
            "         1.6953, -0.5226,  5.6370, -2.6313,  0.9333,  0.6417,  3.5442,  4.1722,\n",
            "        -0.8661, -0.3235, -3.9022,  0.7160, -0.3456, -0.5125, -3.8766, -2.8225])\n",
            "tensor(11.5057)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.4705, 0.3451, 0.0798, 0.0426, 0.0300]),\n",
            "indices=tensor([ 1, 10, 15, 14,  5]))\n",
            "false\n",
            "abdul 0.470480352640152\n",
            "arham 0.3450832664966583\n",
            "arqam 0.07975345849990845\n",
            "arqam 0.04256229102611542\n",
            "abdul 0.030025308951735497\n",
            "video    arham_s6.mp4\n",
            "Name: 10, dtype: object\n",
            "10 (224, 256, 5589) (1, 1)\n",
            "5589\n",
            "arham\n",
            "size 5589\n",
            "input_tenor tensor([[[-1.3302, -1.3130, -1.3130,  ..., -1.6384, -1.6384, -1.6555],\n",
            "         [-1.3130, -1.3130, -1.3302,  ..., -1.6213, -1.6213, -1.6384],\n",
            "         [-1.3130, -1.3130, -1.3130,  ..., -1.6213, -1.6213, -1.6384],\n",
            "         ...,\n",
            "         [-1.3473, -1.3473, -1.3473,  ..., -1.7240, -1.7583, -1.7240],\n",
            "         [-1.3302, -1.3473, -1.3473,  ..., -1.7412, -1.7069, -1.7240],\n",
            "         [-1.3130, -1.3130, -1.3130,  ..., -1.7240, -1.6727, -1.6898]],\n",
            "\n",
            "        [[-1.5185, -1.5014, -1.5014,  ..., -1.7240, -1.7240, -1.7412],\n",
            "         [-1.5014, -1.5014, -1.5185,  ..., -1.7583, -1.7754, -1.7754],\n",
            "         [-1.5014, -1.5014, -1.5014,  ..., -1.7583, -1.7583, -1.7754],\n",
            "         ...,\n",
            "         [-1.4843, -1.4843, -1.4843,  ..., -1.8268, -1.8439, -1.8268],\n",
            "         [-1.4672, -1.4843, -1.4843,  ..., -1.8439, -1.8097, -1.8439],\n",
            "         [-1.4843, -1.4843, -1.4843,  ..., -1.8268, -1.8097, -1.8268]],\n",
            "\n",
            "        [[ 2.0263,  2.0434,  2.0434,  ...,  1.5297,  1.5297,  1.5125],\n",
            "         [ 2.0434,  2.0434,  2.0263,  ...,  1.5125,  1.5125,  1.4954],\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.5125,  1.5125,  1.4954],\n",
            "         ...,\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.3584,  1.3242,  1.3584],\n",
            "         [ 2.0434,  2.0263,  2.0263,  ...,  1.3413,  1.3755,  1.3755],\n",
            "         [ 2.0263,  2.0263,  2.0263,  ...,  1.3584,  1.3755,  1.3584]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-1.5528, -1.5528, -1.5528,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.5699, -1.5699, -1.5699,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.5699, -1.5699, -1.5699,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         ...,\n",
            "         [-1.5870, -1.5870, -1.5870,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.5870, -1.5870, -1.5870,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.5870, -1.5870, -1.5870,  ..., -1.9980, -1.9980, -1.9980]],\n",
            "\n",
            "        [[-1.6042, -1.6042, -1.6042,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.6213, -1.6213, -1.6213,  ..., -1.9809, -1.9638, -1.9638],\n",
            "         [-1.6213, -1.6213, -1.6213,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         ...,\n",
            "         [-1.6555, -1.6555, -1.6555,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.6555, -1.6555, -1.6555,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.6555, -1.6555, -1.6555,  ..., -2.0323, -2.0323, -2.0323]],\n",
            "\n",
            "        [[ 2.2318,  2.2318,  2.2318,  ...,  1.5468,  1.5468,  1.5468],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.5468,  1.5639,  1.5639],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.5639,  1.5639,  1.5639],\n",
            "         ...,\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4954,  1.4954,  1.4954],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4954,  1.4954,  1.4954],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4954,  1.4954,  1.4954]]])\n",
            "torch.Size([1, 5589, 224, 256])\n",
            "tensor([ 2.2016, -3.0210, -2.2160, -0.2785,  0.3793, -2.3412,  0.4776,  1.4701,\n",
            "         1.1568, -2.0310,  2.7416, -0.1169,  2.6042, -0.2320, -3.1655,  4.7129,\n",
            "         0.6275,  3.1148, -2.4067, -3.4165,  2.8046,  0.7877,  2.7822, -3.5675])\n",
            "tensor(11.0556)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.5009, 0.1013, 0.0743, 0.0727, 0.0698]),\n",
            "indices=tensor([15, 17, 20, 22, 10]))\n",
            "false\n",
            "arqam 0.5009047985076904\n",
            "ghazanfar 0.10132713615894318\n",
            "ghazanfar 0.07430415600538254\n",
            "ghazanfar 0.0726575255393982\n",
            "arham 0.0697658434510231\n",
            "video    arham_s5.mp4\n",
            "Name: 11, dtype: object\n",
            "11 (224, 256, 5751) (1, 1)\n",
            "5751\n",
            "arham\n",
            "size 5751\n",
            "input_tenor tensor([[[-2.0323, -2.0323, -2.0323,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         ...,\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.0665, -2.0665, -2.0494],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.0837, -2.0837, -2.0665],\n",
            "         [-2.0323, -2.0323, -2.0323,  ..., -2.0494, -2.0494, -2.0323]],\n",
            "\n",
            "        [[-1.6384, -1.6384, -1.6384,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.6384, -1.6384, -1.6384,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.6384, -1.6384, -1.6384,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         ...,\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9638, -1.9638, -1.9467],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9809, -1.9809, -1.9638],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9980, -1.9980, -1.9809]],\n",
            "\n",
            "        [[ 2.2489,  2.2489,  2.2489,  ...,  1.5639,  1.5639,  1.5639],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5639,  1.5639,  1.5639],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5639,  1.5639,  1.5639],\n",
            "         ...,\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.3070,  1.3070,  1.3242],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2899,  1.2899,  1.3070],\n",
            "         [ 1.8379,  1.8379,  1.8379,  ...,  1.2899,  1.2899,  1.3070]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2318,  2.2318,  2.2318,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.5810,  1.5982,  1.5982],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.5810,  1.5982,  1.5982],\n",
            "         ...,\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.3927,  1.3927,  1.3927]]])\n",
            "torch.Size([1, 5751, 224, 256])\n",
            "tensor([-1.6626,  0.1885, -1.9939,  0.7273,  1.3963,  1.0355,  0.4632, -1.8222,\n",
            "        -1.1168,  1.1993, -1.5770,  0.9884, -1.6856, -2.8037,  0.2897, -1.9662,\n",
            "        -1.2302, -0.9469,  4.0659, -0.6602, -0.9315, -3.5199, -0.4774, -0.9984])\n",
            "tensor(11.5440)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.7164, 0.0496, 0.0408, 0.0346, 0.0330]),\n",
            "indices=tensor([18,  4,  9,  5, 11]))\n",
            "false\n",
            "ghazanfar 0.7164419293403625\n",
            "abdul 0.04963740333914757\n",
            "arham 0.040762826800346375\n",
            "abdul 0.034601956605911255\n",
            "arham 0.03301035985350609\n",
            "video    arqam_s2.mp4\n",
            "Name: 12, dtype: object\n",
            "12 (224, 256, 5709) (1, 1)\n",
            "5709\n",
            "arqam\n",
            "size 5709\n",
            "input_tenor tensor([[[-1.8782, -1.8782, -1.8782,  ..., -2.0323, -2.0152, -1.9809],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         ...,\n",
            "         [-1.9124, -1.9124, -1.9124,  ..., -1.9809, -1.9980, -1.9980],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[-1.7412, -1.7412, -1.7412,  ..., -1.9638, -1.9467, -1.9124],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         ...,\n",
            "         [-1.8268, -1.8268, -1.8439,  ..., -1.9809, -1.9980, -1.9980],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.0152, -2.0152, -2.0152]],\n",
            "\n",
            "        [[ 1.9920,  1.9920,  1.9920,  ...,  1.4098,  1.4269,  1.4612],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         ...,\n",
            "         [ 1.8722,  1.8722,  1.8550,  ...,  1.3413,  1.3242,  1.3242],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.3242,  1.3242,  1.3242]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.9809, -1.9809, -1.9809,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9809, -1.9809, -1.9809,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9809, -1.9809, -1.9809,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1633,  2.1633,  2.1633,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 2.1633,  2.1633,  2.1633,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 2.1633,  2.1633,  2.1633,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         ...,\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.4440,  1.4440,  1.4440],\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.4440,  1.4440,  1.4440],\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.4783,  1.4783,  1.4783]]])\n",
            "torch.Size([1, 5709, 224, 256])\n",
            "tensor([-2.4224, -2.1664, -2.2964,  0.0076,  1.5653, -2.2331,  6.0647, -1.4040,\n",
            "        -3.0940, -0.8189, -0.6272, -3.1560,  1.7777,  1.0445, -0.2525, -1.8423,\n",
            "         1.4280, -0.0498, -1.5064, -5.0622,  0.4440, -0.6725, -0.2789,  0.2756])\n",
            "tensor(11.6601)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.9416, 0.0129, 0.0105, 0.0091, 0.0062]),\n",
            "indices=tensor([ 6, 12,  4, 16, 13]))\n",
            "false\n",
            "arham 0.9415923953056335\n",
            "arqam 0.012942655012011528\n",
            "abdul 0.010466109029948711\n",
            "arqam 0.009123331867158413\n",
            "arqam 0.006217666435986757\n",
            "video    arqam_s3.mp4\n",
            "Name: 13, dtype: object\n",
            "13 (224, 256, 5721) (1, 1)\n",
            "5721\n",
            "arqam\n",
            "size 5721\n",
            "input_tenor tensor([[[-1.8610, -1.8610, -1.8610,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.8610, -1.8610, -1.8782,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.8610, -1.8610, -1.8610,  ..., -1.9638, -1.9467, -1.9638],\n",
            "         ...,\n",
            "         [-1.9124, -1.9124, -1.9124,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.8953, -1.9124, -1.9295,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[-1.6555, -1.6555, -1.6555,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.6384, -1.6384, -1.6555,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.6555, -1.6384, -1.6555,  ..., -1.9638, -1.9467, -1.9638],\n",
            "         ...,\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.7754, -1.7925, -1.7754,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.7412, -1.7583, -1.7754,  ..., -1.9809, -1.9809, -1.9809]],\n",
            "\n",
            "        [[ 1.8893,  1.8893,  1.8893,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 1.8722,  1.8722,  1.8550,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 1.8893,  1.8722,  1.8893,  ...,  1.3584,  1.3755,  1.3584],\n",
            "         ...,\n",
            "         [ 1.8037,  1.8037,  1.8037,  ...,  1.2728,  1.2728,  1.2728],\n",
            "         [ 1.7865,  1.7865,  1.7865,  ...,  1.2899,  1.2899,  1.2899],\n",
            "         [ 1.8208,  1.8037,  1.7865,  ...,  1.2728,  1.2728,  1.2728]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1008, -2.1008, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.9638, -1.9638, -1.9638,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9467, -1.9638, -1.9809,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9467, -1.9638, -1.9809,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0152, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0323, -2.0323,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1119,  2.1119,  2.1633,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 2.1290,  2.1119,  2.1462,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         [ 2.1290,  2.1119,  2.1462,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         ...,\n",
            "         [ 1.9920,  1.9749,  1.9749,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 1.9920,  1.9749,  1.9749,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.4098,  1.4098,  1.4098]]])\n",
            "torch.Size([1, 5721, 224, 256])\n",
            "tensor([ 0.3434, -0.0111, -3.6758, -1.2265, -0.5434,  1.7785,  0.0399,  0.2459,\n",
            "         2.9273, -0.0808, -1.8518,  2.1167,  0.0916,  0.1627,  2.6596,  1.0592,\n",
            "         1.7718,  2.7044,  3.4058, -0.5761,  2.4034,  0.5718,  0.1820, -0.7667])\n",
            "tensor(10.4654)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2410, 0.1493, 0.1195, 0.1143, 0.0884]),\n",
            "indices=tensor([18,  8, 17, 14, 20]))\n",
            "false\n",
            "ghazanfar 0.2409777194261551\n",
            "arham 0.14933088421821594\n",
            "ghazanfar 0.11949649453163147\n",
            "arqam 0.11426394432783127\n",
            "ghazanfar 0.08843994140625\n",
            "video    arqam_s1.mp4\n",
            "Name: 14, dtype: object\n",
            "14 (224, 256, 5673) (1, 1)\n",
            "5673\n",
            "arqam\n",
            "size 5673\n",
            "input_tenor tensor([[[-1.1760, -1.1932, -1.1932,  ..., -1.6555, -1.6555, -1.6727],\n",
            "         [-1.1760, -1.1760, -1.1760,  ..., -1.6555, -1.6555, -1.6555],\n",
            "         [-1.1760, -1.1760, -1.1760,  ..., -1.6555, -1.6555, -1.6555],\n",
            "         ...,\n",
            "         [-1.2274, -1.2274, -1.2274,  ..., -1.6384, -1.6384, -1.6384],\n",
            "         [-1.2274, -1.2274, -1.2274,  ..., -1.6213, -1.6213, -1.6213],\n",
            "         [-1.2445, -1.2445, -1.2445,  ..., -1.6384, -1.6384, -1.6384]],\n",
            "\n",
            "        [[-1.6727, -1.6898, -1.6898,  ..., -1.9467, -1.9638, -1.9638],\n",
            "         [-1.6727, -1.6727, -1.6727,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.6727, -1.6727, -1.6727,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         ...,\n",
            "         [-1.7069, -1.7069, -1.7069,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7069, -1.7069, -1.7069,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.7240, -1.7240, -1.7240,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[ 2.2318,  2.2147,  2.2147,  ...,  1.4269,  1.4269,  1.4098],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4269,  1.4269,  1.4269],\n",
            "         ...,\n",
            "         [ 2.1290,  2.1290,  2.1290,  ...,  1.4954,  1.4954,  1.4954],\n",
            "         [ 2.1290,  2.1290,  2.1290,  ...,  1.5125,  1.5125,  1.5125],\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.5125,  1.5297,  1.5297]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-1.3644, -1.3644, -1.3644,  ..., -1.8439, -1.8439, -1.8439],\n",
            "         [-1.3644, -1.3644, -1.3644,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         [-1.3644, -1.3644, -1.3644,  ..., -1.8610, -1.8610, -1.8610],\n",
            "         ...,\n",
            "         [-1.4158, -1.4158, -1.4158,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         [-1.4158, -1.4158, -1.4158,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         [-1.4158, -1.4158, -1.4158,  ..., -1.8610, -1.8610, -1.8610]],\n",
            "\n",
            "        [[-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2147,  2.2147,  2.2147,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         ...,\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.5982,  1.5982,  1.5982]]])\n",
            "torch.Size([1, 5673, 224, 256])\n",
            "tensor([ 1.1793, -0.6457,  0.9544, -2.3506, -3.4034, -2.2984,  3.3473, -0.6428,\n",
            "         1.3162, -1.5861,  4.3768, -4.2775,  2.2353,  1.8812, -0.2210,  2.4333,\n",
            "         2.8434, -3.9896,  3.8349, -0.8246,  2.0520, -0.2049,  2.1506, -1.5820])\n",
            "tensor(10.8242)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3483, 0.2026, 0.1244, 0.0752, 0.0499]),\n",
            "indices=tensor([10, 18,  6, 16, 15]))\n",
            "false\n",
            "arham 0.34827563166618347\n",
            "ghazanfar 0.20257386565208435\n",
            "arham 0.12439640611410141\n",
            "arqam 0.0751587450504303\n",
            "arqam 0.049874432384967804\n",
            "video    arqam_s6.mp4\n",
            "Name: 15, dtype: object\n",
            "15 (224, 256, 6183) (1, 1)\n",
            "6183\n",
            "arqam\n",
            "size 6183\n",
            "input_tenor tensor([[[-1.9809, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-1.9980, -1.9980, -1.9980,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         ...,\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-2.0494, -2.0665, -2.0494,  ..., -2.0837, -2.0665, -2.0665]],\n",
            "\n",
            "        [[-1.6898, -1.7069, -1.7069,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7240, -1.7240, -1.7240,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7240, -1.7240, -1.7240,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         ...,\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.7583, -1.7754, -1.7754,  ..., -2.0323, -2.0152, -2.0152]],\n",
            "\n",
            "        [[ 2.0434,  2.0263,  2.0263,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 2.0605,  2.0605,  2.0605,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 2.0605,  2.0605,  2.0605,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         ...,\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.2728,  1.2728,  1.2728],\n",
            "         [ 1.8722,  1.8722,  1.8722,  ...,  1.2728,  1.2728,  1.2728],\n",
            "         [ 1.8722,  1.8550,  1.8550,  ...,  1.2557,  1.2728,  1.2728]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-0.1999, -0.1657, -0.1657,  ..., -1.3302, -1.2959, -1.2103],\n",
            "         [-0.2171, -0.1828, -0.1657,  ..., -1.2445, -1.2274, -1.1760],\n",
            "         [-0.1828, -0.1657, -0.1486,  ..., -1.2445, -1.2445, -1.2274],\n",
            "         ...,\n",
            "         [ 2.2489,  2.2489,  2.2489,  ..., -1.6555, -1.5870, -1.5014],\n",
            "         [ 2.1975,  2.2489,  2.2489,  ..., -1.6213, -1.5699, -1.5014],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ..., -1.6042, -1.5528, -1.5014]],\n",
            "\n",
            "        [[-0.1657, -0.1314, -0.1314,  ..., -1.3987, -1.3644, -1.2788],\n",
            "         [-0.1828, -0.1486, -0.1314,  ..., -1.3473, -1.3302, -1.2788],\n",
            "         [-0.1486, -0.1486, -0.1143,  ..., -1.3473, -1.3473, -1.3302],\n",
            "         ...,\n",
            "         [ 2.2489,  2.2489,  2.2489,  ..., -1.3473, -1.2274, -1.1418],\n",
            "         [ 2.1975,  2.2489,  2.2489,  ..., -1.3130, -1.2103, -1.1418],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ..., -1.2959, -1.1932, -1.1418]],\n",
            "\n",
            "        [[-0.1314, -0.0972, -0.0972,  ..., -1.3473, -1.3302, -1.2274],\n",
            "         [-0.1486, -0.1143, -0.0972,  ..., -1.2959, -1.2788, -1.2274],\n",
            "         [-0.1143, -0.1143, -0.0801,  ..., -1.2959, -1.2959, -1.2788],\n",
            "         ...,\n",
            "         [ 2.2489,  2.2489,  2.2489,  ..., -0.5596, -0.4568, -0.3712],\n",
            "         [ 2.1975,  2.2489,  2.2489,  ..., -0.5253, -0.4397, -0.3712],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ..., -0.5082, -0.4226, -0.3712]]])\n",
            "torch.Size([1, 6183, 224, 256])\n",
            "tensor([-3.0575, -2.2121,  0.6494,  1.6085, -3.8619,  0.3781, -2.3822,  2.2711,\n",
            "         0.7689,  1.3372, -0.4572, -1.1720,  1.2508, -0.9094, -0.6885,  0.2799,\n",
            "         0.8828, -0.2655, -2.8574,  0.6118, -0.1971, -2.1043,  1.6309, -3.0146])\n",
            "tensor(11.4796)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2299, 0.1212, 0.1185, 0.0904, 0.0829]),\n",
            "indices=tensor([ 7, 22,  3,  9, 12]))\n",
            "false\n",
            "arham 0.2299148291349411\n",
            "ghazanfar 0.12121430039405823\n",
            "abdul 0.1185232475399971\n",
            "arham 0.09036292880773544\n",
            "arqam 0.08288639038801193\n",
            "video    arqam_s5.mp4\n",
            "Name: 16, dtype: object\n",
            "16 (224, 256, 5610) (1, 1)\n",
            "5610\n",
            "arqam\n",
            "size 5610\n",
            "input_tenor tensor([[[-1.8439, -1.8439, -1.8439,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.8610, -1.8610, -1.8610,  ..., -1.9809, -1.9638, -1.9638],\n",
            "         [-1.8439, -1.8439, -1.8610,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         ...,\n",
            "         [-1.8439, -1.8439, -1.8610,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.8439, -1.8439, -1.8610,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.8610, -1.8610, -1.8610,  ..., -1.9124, -1.9124, -1.9124]],\n",
            "\n",
            "        [[-1.7240, -1.7240, -1.7240,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         [-1.7412, -1.7412, -1.7412,  ..., -2.0323, -2.0152, -2.0152],\n",
            "         [-1.7240, -1.7240, -1.7412,  ..., -2.0152, -2.0152, -2.0152],\n",
            "         ...,\n",
            "         [-1.7412, -1.7412, -1.7583,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.7412, -1.7412, -1.7583,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[ 2.1633,  2.1633,  2.1633,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 2.1462,  2.1462,  2.1462,  ...,  1.3070,  1.3242,  1.3242],\n",
            "         [ 2.1633,  2.1633,  2.1462,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         ...,\n",
            "         [ 2.1119,  2.1119,  2.0948,  ...,  1.5297,  1.5297,  1.5297],\n",
            "         [ 2.1119,  2.1119,  2.0948,  ...,  1.5297,  1.5297,  1.5297],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.5297,  1.5297,  1.5297]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-1.9980, -1.9980, -1.9980,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.8610, -1.8610, -1.8610,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8610, -1.8610, -1.8610,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2489,  2.2489,  2.2489,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         ...,\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.7009,  1.7009,  1.7009],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.7009,  1.7009,  1.7009]]])\n",
            "torch.Size([1, 5610, 224, 256])\n",
            "tensor([ 0.2125, -2.4415,  0.0695,  0.1436, -0.8920,  3.7316, -1.8509, -0.2304,\n",
            "         0.1336,  2.9350,  3.7026,  1.4846, -2.5299,  4.2291,  2.4049, -7.6545,\n",
            "        -1.6132,  0.7255, -6.7297,  4.8051,  4.7619,  0.3646,  1.0333, -2.5474])\n",
            "tensor(10.8536)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2794, 0.2676, 0.1571, 0.0955, 0.0928]),\n",
            "indices=tensor([19, 20, 13,  5, 10]))\n",
            "false\n",
            "ghazanfar 0.2794165015220642\n",
            "ghazanfar 0.2675988972187042\n",
            "arqam 0.15707498788833618\n",
            "abdul 0.09550914168357849\n",
            "arham 0.09277565032243729\n",
            "video    arqam_s4.mp4\n",
            "Name: 17, dtype: object\n",
            "17 (224, 256, 5739) (1, 1)\n",
            "5739\n",
            "arqam\n",
            "size 5739\n",
            "input_tenor tensor([[[-1.8439, -1.8439, -1.8439,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9809, -1.9467, -1.9467],\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         ...,\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8610, -1.8610, -1.8439,  ..., -1.9467, -1.9638, -1.9809]],\n",
            "\n",
            "        [[-1.8610, -1.8610, -1.8610,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         [-1.8268, -1.8268, -1.8268,  ..., -2.0323, -2.0323, -2.0323],\n",
            "         ...,\n",
            "         [-1.7754, -1.7754, -1.7754,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8097, -1.8097, -1.7925,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.8268, -1.8268, -1.7925,  ..., -1.9809, -1.9980, -2.0152]],\n",
            "\n",
            "        [[ 1.9749,  1.9749,  1.9749,  ...,  1.3070,  1.3070,  1.3070],\n",
            "         [ 1.9407,  1.9407,  1.9407,  ...,  1.3070,  1.3070,  1.3070],\n",
            "         [ 1.9407,  1.9407,  1.9407,  ...,  1.3070,  1.3070,  1.3070],\n",
            "         ...,\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 2.0605,  2.0605,  2.0263,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 1.9920,  1.9920,  1.9749,  ...,  1.3242,  1.3070,  1.2899]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.0494, -2.0494, -2.0494,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0665, -2.0665, -2.0665,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2489,  2.2489,  2.2489,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.4783,  1.4783,  1.4783],\n",
            "         ...,\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.5468,  1.5468,  1.5468],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.5468,  1.5468,  1.5468],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.5125,  1.5125,  1.5125]]])\n",
            "torch.Size([1, 5739, 224, 256])\n",
            "tensor([-0.5178, -1.7030, -3.6313,  0.9706,  1.0957,  0.7739, -3.3471,  2.3066,\n",
            "         2.2826, -5.3858, -0.8115, -5.0838,  3.0133,  4.5265,  2.1198, -2.6924,\n",
            "         3.0085, -1.8376, -0.8067,  2.2420,  5.3260, -2.1462,  1.9581, -5.2656])\n",
            "tensor(11.1551)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.5229, 0.2351, 0.0518, 0.0515, 0.0255]),\n",
            "indices=tensor([20, 13, 12, 16,  7]))\n",
            "false\n",
            "ghazanfar 0.5228882431983948\n",
            "arqam 0.23507800698280334\n",
            "arqam 0.05176691338419914\n",
            "arqam 0.05151805281639099\n",
            "arham 0.025532834231853485\n",
            "video    ghazanfar_s3.mp4\n",
            "Name: 18, dtype: object\n",
            "18 (224, 256, 5586) (1, 1)\n",
            "5586\n",
            "ghazanfar\n",
            "size 5586\n",
            "input_tenor tensor([[[-1.8782, -1.8782, -1.8782,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -1.9809, -1.9467, -1.9467],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         ...,\n",
            "         [-1.8097, -1.7925, -1.8439,  ..., -1.9467, -1.9638, -1.9809],\n",
            "         [-1.8097, -1.8097, -1.8439,  ..., -1.9467, -1.9638, -1.9809],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[-1.6042, -1.6042, -1.6042,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.6042, -1.6042, -1.6042,  ..., -1.9124, -1.9295, -1.9295],\n",
            "         [-1.5699, -1.5699, -1.5699,  ..., -1.8953, -1.8953, -1.8953],\n",
            "         ...,\n",
            "         [-1.5357, -1.5185, -1.5699,  ..., -1.9295, -1.9467, -1.9638],\n",
            "         [-1.5357, -1.5357, -1.5699,  ..., -1.9295, -1.9467, -1.9638],\n",
            "         [-1.5699, -1.5699, -1.5699,  ..., -1.9467, -1.9467, -1.9467]],\n",
            "\n",
            "        [[ 1.9407,  1.9407,  1.9407,  ...,  1.2385,  1.2385,  1.2385],\n",
            "         [ 1.9407,  1.9407,  1.9407,  ...,  1.2385,  1.2385,  1.2385],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.2557,  1.2557,  1.2557],\n",
            "         ...,\n",
            "         [ 2.0092,  2.0263,  1.9749,  ...,  1.2385,  1.2214,  1.2043],\n",
            "         [ 2.0092,  2.0092,  1.9749,  ...,  1.2385,  1.2214,  1.2043],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.2214,  1.2214,  1.2214]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.7754, -1.7754, -1.7754,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.7925, -1.7925, -1.7925,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7925, -1.7925, -1.7925,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7754, -1.7754, -1.7754,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.1975,  2.1975,  2.1975,  ...,  1.4440,  1.4440,  1.4440],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.4440,  1.4440,  1.4440],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.4440,  1.4440,  1.4440],\n",
            "         ...,\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4098,  1.4098,  1.4098]]])\n",
            "torch.Size([1, 5586, 224, 256])\n",
            "tensor([-0.4072,  0.8888, -2.9530, -2.4661,  0.8638, -1.6877, -2.9760,  1.0423,\n",
            "        -0.1801,  0.7958,  0.1325, -3.6012, -0.4699,  0.0298,  0.5295,  2.4913,\n",
            "        -1.2320, -1.1490, -2.3872, -1.2879, -0.3732,  0.3535,  1.2648, -1.1253])\n",
            "tensor(11.6031)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.3423, 0.1004, 0.0804, 0.0689, 0.0672]),\n",
            "indices=tensor([15, 22,  7,  1,  4]))\n",
            "false\n",
            "arqam 0.34226661920547485\n",
            "ghazanfar 0.10039544105529785\n",
            "arham 0.08036331832408905\n",
            "abdul 0.06893093138933182\n",
            "abdul 0.06722845137119293\n",
            "video    ghazanfar_s2.mp4\n",
            "Name: 19, dtype: object\n",
            "19 (224, 256, 2799) (1, 1)\n",
            "2799\n",
            "ghazanfar\n",
            "size 2799\n",
            "input_tenor tensor([[[-1.8268, -1.8439, -1.8439,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         ...,\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.8782, -1.8782, -1.8782,  ..., -1.9638, -1.9467, -1.9809],\n",
            "         [-1.8782, -1.8953, -1.8953,  ..., -1.9638, -1.9638, -1.9638]],\n",
            "\n",
            "        [[-1.5185, -1.5357, -1.5357,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.5357, -1.5357, -1.5357,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.5357, -1.5357, -1.5357,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         ...,\n",
            "         [-1.7069, -1.7069, -1.7069,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.7069, -1.7069, -1.7069,  ..., -1.9980, -1.9809, -2.0152],\n",
            "         [-1.7069, -1.7240, -1.7240,  ..., -1.9980, -1.9980, -1.9980]],\n",
            "\n",
            "        [[ 2.0605,  2.0434,  2.0434,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.3584,  1.3584,  1.3584],\n",
            "         ...,\n",
            "         [ 1.7009,  1.7009,  1.7009,  ...,  0.9817,  0.9817,  0.9817],\n",
            "         [ 1.7009,  1.7009,  1.7009,  ...,  0.9817,  0.9988,  0.9646],\n",
            "         [ 1.7009,  1.6838,  1.6838,  ...,  0.9817,  0.9817,  0.9817]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.7583, -1.7583, -1.7583,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2318,  2.2318,  2.2318,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.2318,  2.2318,  2.2318,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         ...,\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.0844,  1.0844,  1.0844],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.0844,  1.0844,  1.0844],\n",
            "         [ 1.9749,  1.9749,  1.9749,  ...,  1.0844,  1.0844,  1.0844]]])\n",
            "torch.Size([1, 2799, 224, 256])\n",
            "tensor([-1.5501,  1.9401,  1.3544, -0.3331, -1.0473,  1.4130, -0.2393,  0.5858,\n",
            "         0.7384,  0.1760,  0.0374, -0.1895,  2.3783,  0.6579,  0.2558,  1.2639,\n",
            "        -0.4790, -0.2734, -0.7624, -2.1831, -0.4292, -0.0994,  1.5054, -0.4426])\n",
            "tensor(10.9028)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2150, 0.1387, 0.0898, 0.0819, 0.0772]),\n",
            "indices=tensor([12,  1, 22,  5,  2]))\n",
            "false\n",
            "arqam 0.21500304341316223\n",
            "abdul 0.13872230052947998\n",
            "ghazanfar 0.08981640636920929\n",
            "abdul 0.08188824355602264\n",
            "abdul 0.07722960412502289\n",
            "video    ghazanfar_s1.mp4\n",
            "Name: 20, dtype: object\n",
            "20 (224, 256, 5664) (1, 1)\n",
            "5664\n",
            "ghazanfar\n",
            "size 5664\n",
            "input_tenor tensor([[[-1.8097, -1.8782, -1.8268,  ..., -1.9638, -1.9809, -1.9809],\n",
            "         [-1.8268, -1.8782, -1.8268,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         [-1.8097, -1.8097, -1.8782,  ..., -1.9809, -1.9809, -1.9809],\n",
            "         ...,\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9638, -1.9638, -1.9638],\n",
            "         [-1.8439, -1.8268, -1.8268,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.8439, -1.8439, -1.8439,  ..., -1.9124, -1.9124, -1.9124]],\n",
            "\n",
            "        [[-1.5357, -1.6042, -1.5528,  ..., -1.8953, -1.9124, -1.9124],\n",
            "         [-1.5528, -1.6042, -1.5528,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.5357, -1.5357, -1.6042,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         ...,\n",
            "         [-1.5357, -1.5357, -1.5357,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.5357, -1.5357, -1.5185,  ..., -1.9295, -1.9295, -1.9295],\n",
            "         [-1.5185, -1.5185, -1.5185,  ..., -1.9295, -1.9295, -1.9295]],\n",
            "\n",
            "        [[ 2.0092,  1.9578,  1.9920,  ...,  1.3413,  1.3242,  1.3242],\n",
            "         [ 1.9920,  1.9407,  1.9920,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         [ 2.0092,  2.0092,  1.9407,  ...,  1.3242,  1.3242,  1.3242],\n",
            "         ...,\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         [ 2.0434,  2.0605,  2.0605,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 2.0092,  2.0092,  2.0092,  ...,  1.2043,  1.2043,  1.2043]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0152, -2.0152, -2.0152,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.6727, -1.6727, -1.6727,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-1.6727, -1.6727, -1.6727,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-1.6727, -1.6727, -1.6727,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         ...,\n",
            "         [-1.8097, -1.8097, -1.8097,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8097, -1.8097, -1.8097,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-1.8268, -1.8268, -1.8097,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.2489,  2.2489,  2.2489,  ...,  1.6838,  1.6838,  1.6838],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.6667,  1.6667,  1.6667],\n",
            "         [ 2.2489,  2.2489,  2.2489,  ...,  1.6667,  1.6667,  1.6667],\n",
            "         ...,\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.4098,  1.4098,  1.4098],\n",
            "         [ 2.1804,  2.1804,  2.1804,  ...,  1.3584,  1.3584,  1.3584]]])\n",
            "torch.Size([1, 5664, 224, 256])\n",
            "tensor([ 3.9465, -7.0692,  0.3593, -0.5541, -2.2155, -0.6202, -1.6009, -0.8114,\n",
            "        -0.6664, -1.2408, -3.1577, -2.4506, -1.7359,  0.0489,  1.2361, -0.4969,\n",
            "         3.0423,  8.1706, -2.0551,  6.7995,  0.5529, -1.5605,  1.5180,  3.9299])\n",
            "tensor(11.1885)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.7732, 0.1963, 0.0113, 0.0111, 0.0046]),\n",
            "indices=tensor([17, 19,  0, 23, 16]))\n",
            "true\n",
            "ghazanfar 0.7731673121452332\n",
            "ghazanfar 0.196268230676651\n",
            "abdul 0.011318463832139969\n",
            " 0.01113196276128292\n",
            "arqam 0.004582380410283804\n",
            "video    ghazanfar_s6.mp4\n",
            "Name: 21, dtype: object\n",
            "21 (224, 256, 5436) (1, 1)\n",
            "5436\n",
            "ghazanfar\n",
            "size 5436\n",
            "input_tenor tensor([[[-2.1008, -2.1179, -2.1179,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.0494, -2.0665, -2.0665,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         [-2.0494, -2.0494, -2.0494,  ..., -2.0494, -2.0494, -2.0494],\n",
            "         ...,\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -1.9124, -1.9124, -1.9124],\n",
            "         [-1.9295, -1.9295, -1.9295,  ..., -1.9124, -1.9124, -1.9124]],\n",
            "\n",
            "        [[ 1.4612,  1.4440,  1.4440,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 1.4612,  1.4612,  1.4612,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         [ 1.4612,  1.4612,  1.4612,  ...,  1.4612,  1.4612,  1.4612],\n",
            "         ...,\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.9578,  1.9578,  1.9578],\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.9578,  1.9578,  1.9578],\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.9578,  1.9578,  1.9578]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 1.7694,  1.7694,  1.7694,  ...,  1.6153,  1.6153,  1.6153],\n",
            "         [ 1.7523,  1.7694,  1.7694,  ...,  1.5810,  1.5810,  1.5810],\n",
            "         [ 1.7523,  1.7694,  1.7694,  ...,  1.6153,  1.6153,  1.6153],\n",
            "         ...,\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  2.1975,  2.1975,  2.1975],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  2.1975,  2.1975,  2.1975],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  2.1975,  2.1975,  2.1975]]])\n",
            "torch.Size([1, 5436, 224, 256])\n",
            "tensor([ 5.5091e+00,  4.4822e+00,  2.9924e+00, -4.7767e+00, -4.4799e+00,\n",
            "         2.2476e+00, -1.9218e+00,  9.8086e-01,  2.4554e+00, -2.1430e+00,\n",
            "        -1.0890e+00,  7.2488e+00,  1.2898e-03, -3.1087e+00, -5.5615e-01,\n",
            "        -2.2326e+00, -2.3058e+00, -1.7460e+00,  1.0950e+00,  5.9552e+00,\n",
            "         1.1274e-01,  2.9504e+00, -2.2505e+00, -5.5006e-01])\n",
            "tensor(11.4624)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.6399, 0.1755, 0.1124, 0.0402, 0.0091]),\n",
            "indices=tensor([11, 19,  0,  1,  2]))\n",
            "false\n",
            "arham 0.6399266123771667\n",
            "ghazanfar 0.17552317678928375\n",
            "abdul 0.11235442757606506\n",
            "abdul 0.0402364544570446\n",
            "abdul 0.009069620631635189\n",
            "video    ghazanfar_s5.mp4\n",
            "Name: 22, dtype: object\n",
            "22 (224, 256, 5055) (1, 1)\n",
            "5055\n",
            "ghazanfar\n",
            "size 5055\n",
            "input_tenor tensor([[[-1.9295, -1.9467, -1.9295,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-1.9638, -1.9467, -1.9638,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         [-1.9638, -1.9295, -1.9809,  ..., -2.0665, -2.0665, -2.0665],\n",
            "         ...,\n",
            "         [-1.9467, -1.9638, -1.9638,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-1.9467, -1.9638, -1.9809,  ..., -2.0837, -2.0837, -2.1008],\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.0494, -2.0665, -2.0494]],\n",
            "\n",
            "        [[-1.0904, -1.0904, -1.0904,  ..., -1.6213, -1.6213, -1.6213],\n",
            "         [-1.1075, -1.0904, -1.1075,  ..., -1.6213, -1.6213, -1.6213],\n",
            "         [-1.1075, -1.0733, -1.1247,  ..., -1.6213, -1.6213, -1.6213],\n",
            "         ...,\n",
            "         [-1.0904, -1.1075, -1.1075,  ..., -1.6384, -1.6384, -1.6384],\n",
            "         [-1.0904, -1.1075, -1.1247,  ..., -1.6384, -1.6384, -1.6555],\n",
            "         [-1.1075, -1.1075, -1.1075,  ..., -1.6555, -1.6727, -1.6555]],\n",
            "\n",
            "        [[ 2.0263,  2.0092,  2.0263,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.9920,  2.0092,  1.9920,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 1.9920,  2.0263,  1.9749,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         ...,\n",
            "         [ 2.0092,  1.9920,  1.9920,  ...,  1.2043,  1.2043,  1.2043],\n",
            "         [ 2.0092,  1.9920,  1.9749,  ...,  1.2043,  1.2043,  1.1872],\n",
            "         [ 1.9920,  1.9920,  1.9920,  ...,  1.2043,  1.1872,  1.2043]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.4158, -1.4158, -1.4158,  ..., -1.9295, -1.9467, -1.9467],\n",
            "         [-1.3987, -1.3987, -1.3987,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         [-1.3987, -1.3987, -1.3987,  ..., -1.9467, -1.9467, -1.9467],\n",
            "         ...,\n",
            "         [-1.4672, -1.4672, -1.4672,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.4672, -1.4672, -1.4672,  ..., -1.9980, -1.9980, -1.9980],\n",
            "         [-1.4843, -1.4843, -1.4843,  ..., -2.0152, -2.0152, -2.0152]],\n",
            "\n",
            "        [[ 2.1804,  2.1804,  2.1804,  ...,  1.3584,  1.3413,  1.3413],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.3413,  1.3413,  1.3413],\n",
            "         [ 2.1975,  2.1975,  2.1975,  ...,  1.3413,  1.3413,  1.3413],\n",
            "         ...,\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 2.1119,  2.1119,  2.1119,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.2043,  1.2043,  1.2043]]])\n",
            "torch.Size([1, 5055, 224, 256])\n",
            "tensor([-5.8667, -5.2527, -3.7217,  2.9138, -3.5023, -0.7574,  5.1984, -1.3470,\n",
            "         1.3566, -0.8024,  0.6925,  0.9420, -1.6506, -5.5056,  1.8662,  0.2674,\n",
            "         5.1871, -3.6287, -0.5721, -1.5749,  1.8344, -3.3285,  7.3855,  3.8317])\n",
            "tensor(11.2697)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.7821, 0.0878, 0.0868, 0.0224, 0.0089]),\n",
            "indices=tensor([22,  6, 16, 23,  3]))\n",
            "true\n",
            "ghazanfar 0.782102108001709\n",
            "arham 0.08778416365385056\n",
            "arqam 0.08679322153329849\n",
            " 0.02238030545413494\n",
            "abdul 0.00893695093691349\n",
            "video    ghazanfar_s4.mp4\n",
            "Name: 23, dtype: object\n",
            "23 (224, 256, 5766) (1, 1)\n",
            "5766\n",
            "ghazanfar\n",
            "size 5766\n",
            "input_tenor tensor([[[-2.1008, -2.1008, -2.0837,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.0837, -2.0837, -2.1008],\n",
            "         [-2.1008, -2.1008, -2.1008,  ..., -2.0837, -2.0837, -2.1008],\n",
            "         ...,\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.0837, -2.0837, -2.0837,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-1.9638, -1.9638, -1.9467,  ..., -2.0837, -2.0837, -2.0837],\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.0665, -2.0665, -2.0837],\n",
            "         [-1.9638, -1.9638, -1.9638,  ..., -2.0665, -2.0665, -2.0837],\n",
            "         ...,\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1008, -2.1179, -2.1179],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -2.1008, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 1.8893,  1.8893,  1.9064,  ...,  1.0844,  1.0844,  1.0844],\n",
            "         [ 1.8893,  1.8893,  1.8893,  ...,  1.1015,  1.1015,  1.0844],\n",
            "         [ 1.8893,  1.8893,  1.8893,  ...,  1.1015,  1.0844,  1.0844],\n",
            "         ...,\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.2214,  1.2214,  1.2214],\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.2214,  1.2043,  1.2043],\n",
            "         [ 2.0434,  2.0434,  2.0434,  ...,  1.2214,  1.2043,  1.2043]],\n",
            "\n",
            "        ...,\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1008, -2.1008, -2.1008],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         ...,\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179],\n",
            "         [-2.1179, -2.1179, -2.1179,  ..., -2.1179, -2.1179, -2.1179]],\n",
            "\n",
            "        [[ 2.0948,  2.0948,  2.0948,  ...,  1.1700,  1.1700,  1.1700],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         [ 2.0948,  2.0948,  2.0948,  ...,  1.1872,  1.1872,  1.1872],\n",
            "         ...,\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.2899,  1.2899,  1.2899],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.2899,  1.2899,  1.2899],\n",
            "         [ 2.2147,  2.2147,  2.2147,  ...,  1.3242,  1.3242,  1.3242]]])\n",
            "torch.Size([1, 5766, 224, 256])\n",
            "tensor([ 0.1332, -3.8164, -4.1885, -3.4989,  2.9533,  0.2419,  2.2494,  1.0950,\n",
            "         1.0780, -2.5038,  1.8691,  1.2213, -2.3121,  0.9729,  0.4623, -0.2014,\n",
            "         1.0289, -1.3644,  0.6732,  1.4260, -3.1691, -2.2677, -0.1587,  1.2558])\n",
            "tensor(11.3110)\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.2911, 0.1440, 0.0984, 0.0632, 0.0533]),\n",
            "indices=tensor([ 4,  6, 10, 19, 23]))\n",
            "false\n",
            "abdul 0.29107677936553955\n",
            "arham 0.14398832619190216\n",
            "arham 0.09843788295984268\n",
            "ghazanfar 0.0632033571600914\n",
            " 0.053307779133319855\n",
            "[0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0]\n",
            "accuracy:  62.5\n"
          ]
        }
      ],
      "source": [
        "for i in range(len(ppg_dataset)):\n",
        "    sample = ppg_dataset[i]\n",
        "   \n",
        "    print(i, sample['image'].shape, sample['labels'].shape)\n",
        "\n",
        "    \n",
        "    #size = sample['image'].shape[2]\n",
        "    print(sample['image'].shape[2])\n",
        "    current_name = sample['labels'][0][0].split('_')[0]\n",
        "    print(current_name)\n",
        "    input_tensor = tens(sample['image'])\n",
        "\n",
        "    # create a mini-batch as expected by the model\n",
        "    input_batch = input_tensor.unsqueeze(0)\n",
        "    \n",
        "    #move the input and model to GPU for speed if available\n",
        "    if torch.cuda.is_available():\n",
        "      input_batch = input_batch.to('cuda')\n",
        "      model.to('cuda')\n",
        "    print(input_batch.shape)\n",
        "    with torch.no_grad():\n",
        "      output = model(input_batch)\n",
        "\n",
        "\n",
        "    print(output[0])\n",
        "    labels = torch.tensor(int_labels)\n",
        "    loss = criterion(output[0],labels)\n",
        "    print(loss)\n",
        "\n",
        "    # The output has unnormalized scores. To get probabilities, you can run a softmax on it.\n",
        "    probabilities = torch.nn.functional.softmax(output[0], dim=0)\n",
        "    print('probabilities',probabilities.size(0))\n",
        "  \n",
        "    top5_prob, top5_catid = torch.topk(probabilities, 5)\n",
        "    print(torch.topk(probabilities, 5))\n",
        "    if categories[top5_catid[0]] == current_name:\n",
        "      trues.append(1)\n",
        "      print('true')\n",
        "    else:\n",
        "      trues.append(0)\n",
        "      print('false')\n",
        "    outputs.append(top5_prob[0].item())\n",
        "    #print(top5_catid)\n",
        "    for i in range(top5_prob.size(0)):\n",
        "      print(categories[top5_catid[i]], top5_prob[i].item())\n",
        "\n",
        "print('accuracy: ',accuracy(outputs,trues))\n",
        "pickle.dump(model,open('model_pkl','wb'))"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "kkw5EIdeGszR",
      "metadata": {
        "id": "kkw5EIdeGszR"
      },
      "source": [
        "Flask\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "jkbzZHY5Gv3Z",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "jkbzZHY5Gv3Z",
        "outputId": "320467d2-8fba-444a-b287-eb3b944fe1fe"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Requirement already satisfied: flask in /usr/local/lib/python3.8/dist-packages (1.1.4)\n",
            "Requirement already satisfied: Werkzeug<2.0,>=0.15 in /usr/local/lib/python3.8/dist-packages (from flask) (1.0.1)\n",
            "Requirement already satisfied: Jinja2<3.0,>=2.10.1 in /usr/local/lib/python3.8/dist-packages (from flask) (2.11.3)\n",
            "Requirement already satisfied: click<8.0,>=5.1 in /usr/local/lib/python3.8/dist-packages (from flask) (7.1.2)\n",
            "Requirement already satisfied: itsdangerous<2.0,>=0.24 in /usr/local/lib/python3.8/dist-packages (from flask) (1.1.0)\n",
            "Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.8/dist-packages (from Jinja2<3.0,>=2.10.1->flask) (2.0.1)\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting flask-ngrok\n",
            "  Downloading flask_ngrok-0.0.25-py3-none-any.whl (3.1 kB)\n",
            "Requirement already satisfied: requests in /usr/local/lib/python3.8/dist-packages (from flask-ngrok) (2.25.1)\n",
            "Requirement already satisfied: Flask>=0.8 in /usr/local/lib/python3.8/dist-packages (from flask-ngrok) (1.1.4)\n",
            "Requirement already satisfied: itsdangerous<2.0,>=0.24 in /usr/local/lib/python3.8/dist-packages (from Flask>=0.8->flask-ngrok) (1.1.0)\n",
            "Requirement already satisfied: click<8.0,>=5.1 in /usr/local/lib/python3.8/dist-packages (from Flask>=0.8->flask-ngrok) (7.1.2)\n",
            "Requirement already satisfied: Werkzeug<2.0,>=0.15 in /usr/local/lib/python3.8/dist-packages (from Flask>=0.8->flask-ngrok) (1.0.1)\n",
            "Requirement already satisfied: Jinja2<3.0,>=2.10.1 in /usr/local/lib/python3.8/dist-packages (from Flask>=0.8->flask-ngrok) (2.11.3)\n",
            "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.8/dist-packages (from requests->flask-ngrok) (1.24.3)\n",
            "Requirement already satisfied: chardet<5,>=3.0.2 in /usr/local/lib/python3.8/dist-packages (from requests->flask-ngrok) (4.0.0)\n",
            "Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.8/dist-packages (from requests->flask-ngrok) (2.10)\n",
            "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.8/dist-packages (from requests->flask-ngrok) (2022.12.7)\n",
            "Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.8/dist-packages (from Jinja2<3.0,>=2.10.1->Flask>=0.8->flask-ngrok) (2.0.1)\n",
            "Installing collected packages: flask-ngrok\n",
            "Successfully installed flask-ngrok-0.0.25\n",
            "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n",
            "Collecting pyngrok==4.1.1\n",
            "  Downloading pyngrok-4.1.1.tar.gz (18 kB)\n",
            "  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "Requirement already satisfied: future in /usr/local/lib/python3.8/dist-packages (from pyngrok==4.1.1) (0.16.0)\n",
            "Requirement already satisfied: PyYAML in /usr/local/lib/python3.8/dist-packages (from pyngrok==4.1.1) (6.0)\n",
            "Building wheels for collected packages: pyngrok\n",
            "  Building wheel for pyngrok (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Created wheel for pyngrok: filename=pyngrok-4.1.1-py3-none-any.whl size=15982 sha256=844ff27f4ffa2c32ebbb760fe57514bee004326ba12284105324f91141e6c56b\n",
            "  Stored in directory: /root/.cache/pip/wheels/5e/0a/51/8cb053ccd84481dd3233eba4cdb608bc7a885fd8ca418c0806\n",
            "Successfully built pyngrok\n",
            "Installing collected packages: pyngrok\n",
            "Successfully installed pyngrok-4.1.1\n"
          ]
        }
      ],
      "source": [
        "!pip install flask\n",
        "!pip install flask-ngrok\n",
        "!pip install pyngrok==4.1.1"
      ]
    },
    {
      "attachments": {},
      "cell_type": "markdown",
      "id": "c87a0b9b",
      "metadata": {},
      "source": [
        "Write your authtoken below"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "id": "TlDKhXGgSeS1",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "TlDKhXGgSeS1",
        "outputId": "65b7b5c6-4cae-4861-cff2-b68b73f3ba9e"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "Authtoken saved to configuration file: /root/.ngrok2/ngrok.yml\n"
          ]
        }
      ],
      "source": [
        "!ngrok authtoken 'here'"
      ]
    },
    {
      "cell_type": "markdown",
      "id": "xBGCz5YiPdLe",
      "metadata": {
        "id": "xBGCz5YiPdLe"
      },
      "source": [
        "**Flask Application**\n",
        "\n",
        "\n",
        "\n",
        "We deployed code to server using flask and receive video from flutter and run the model on video received and send result back to flutter.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 24,
      "id": "rssiFYVHG1WT",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "rssiFYVHG1WT",
        "outputId": "4e9b658c-b159-4122-8692-9e9374d1c91d"
      },
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            " * Serving Flask app \"__main__\" (lazy loading)\n",
            " * Environment: production\n",
            "\u001b[31m   WARNING: This is a development server. Do not use it in a production deployment.\u001b[0m\n",
            "\u001b[2m   Use a production WSGI server instead.\u001b[0m\n",
            " * Debug mode: off\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "INFO:werkzeug: * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            " * Running on http://134a-35-196-97-8.ngrok.io\n",
            " * Traffic stats available on http://127.0.0.1:4040\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "INFO:werkzeug:127.0.0.1 - - [08/Jan/2023 16:35:16] \"\u001b[37mPOST /upload HTTP/1.1\u001b[0m\" 200 -\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "(224, 256, 3)\n",
            "size 3\n",
            "input_tenor tensor([[[-1.9124, -1.9124, -1.9124,  ..., -1.9124, -1.8953, -1.8782],\n",
            "         [-1.9124, -1.9124, -1.9124,  ..., -1.8782, -1.8439, -1.8953],\n",
            "         [-1.8782, -1.9124, -1.9124,  ..., -1.8782, -1.8782, -1.8782],\n",
            "         ...,\n",
            "         [-1.7754, -1.7754, -1.7925,  ..., -1.7240, -1.7583, -1.7925],\n",
            "         [-1.8268, -1.8439, -1.8097,  ..., -1.7754, -1.7583, -1.7412],\n",
            "         [-1.7754, -1.7754, -1.7754,  ..., -1.7583, -1.7754, -1.7583]],\n",
            "\n",
            "        [[-1.8782, -1.8953, -1.8953,  ..., -1.8782, -1.8953, -1.8782],\n",
            "         [-1.8782, -1.8953, -1.8953,  ..., -1.8782, -1.8953, -1.8953],\n",
            "         [-1.8953, -1.8953, -1.8953,  ..., -1.8953, -1.8782, -1.8782],\n",
            "         ...,\n",
            "         [-1.6727, -1.6727, -1.6898,  ..., -1.6727, -1.6555, -1.6727],\n",
            "         [-1.6898, -1.6898, -1.6727,  ..., -1.6727, -1.6555, -1.6898],\n",
            "         [-1.6727, -1.6727, -1.6727,  ..., -1.6555, -1.6727, -1.6555]],\n",
            "\n",
            "        [[ 1.2557,  1.2899,  1.2899,  ...,  1.3755,  1.3584,  1.3755],\n",
            "         [ 1.2557,  1.2899,  1.2899,  ...,  1.3755,  1.3755,  1.3584],\n",
            "         [ 1.3070,  1.2899,  1.2899,  ...,  1.3755,  1.3755,  1.3755],\n",
            "         ...,\n",
            "         [ 1.9064,  1.9064,  1.8893,  ...,  2.1975,  2.1975,  2.1804],\n",
            "         [ 1.8893,  1.8893,  1.9064,  ...,  2.1804,  2.1975,  2.1804],\n",
            "         [ 1.9064,  1.9064,  1.9064,  ...,  2.1975,  2.1804,  2.1975]]])\n",
            "torch.Size([1, 3, 224, 256])\n",
            "probabilities 24\n",
            "torch.return_types.topk(\n",
            "values=tensor([0.9769, 0.0037, 0.0031, 0.0027, 0.0022]),\n",
            "indices=tensor([21, 12,  4,  1, 14]))\n",
            "false\n",
            "ghazanfar 0.9768533706665039\n"
          ]
        },
        {
          "name": "stderr",
          "output_type": "stream",
          "text": [
            "INFO:werkzeug:127.0.0.1 - - [08/Jan/2023 16:35:20] \"\u001b[37mGET /result HTTP/1.1\u001b[0m\" 200 -\n"
          ]
        },
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "ghazanfar\n"
          ]
        }
      ],
      "source": [
        "import numpy as np\n",
        "from flask_ngrok import run_with_ngrok\n",
        "from flask import Flask,request,jsonify,render_template,redirect\n",
        "import pickle\n",
        "from werkzeug.utils import secure_filename\n",
        "import urllib.request\n",
        "from pathlib import Path\n",
        "import tempfile\n",
        "\n",
        "app = Flask(__name__, template_folder='templates')\n",
        "\n",
        "run_with_ngrok(app)\n",
        "\n",
        "model1 = pickle.load(open('model_pkl','rb'))\n",
        "\n",
        "\n",
        "ind =0\n",
        "@app.route('/')\n",
        "def upload_form():\n",
        "\treturn render_template('index.html')\n",
        "\n",
        "@app.route('/upload', methods=['POST'])\n",
        "def upload_video():\n",
        "\t\n",
        "\tif 'file' not in request.files:\n",
        "\t\treturn redirect(request.url)\n",
        "\n",
        "\tupload_file = request.files['file']\n",
        "\tif upload_file.filename == '':\n",
        "\t\treturn redirect(request.url)\n",
        "\telse:\n",
        "\t\tfilename = secure_filename(upload_file.filename)\n",
        "\t\tcurrent_name = filename.split('_')[0]\n",
        "\t\twith tempfile.TemporaryDirectory() as td:\n",
        "\t\t\ttemp_filename = Path(td) / 'uploaded_video'\n",
        "\t\t\tupload_file.save(temp_filename)\n",
        "\t \n",
        "\t\t\n",
        "\t\t\tcap = cv2.VideoCapture(str(temp_filename))\n",
        "\t\t\ti=0\n",
        "\t\t\tdata=None\n",
        "\t\t\twhile(cap.isOpened()):\n",
        "\t\t\t\tret, frame = cap.read()\n",
        "\t\t\t\tif ret == False:\n",
        "\t\t\t\t\tbreak\n",
        "        #print(frame.shape)\n",
        "\t\t\t\tframe=cv2.resize(frame, (256,224))\n",
        "        #input_tensor = preprocess(frame)\n",
        "        #print(frame.shape)\n",
        "\t\t\t\tif(i==0):\n",
        "               \n",
        "\t\t\t\t\tdata=frame\n",
        "\t\t\t\telse:\n",
        "\t\t\t\t\tdata=np.concatenate((data,frame),axis=2)\n",
        "\t\t\ti+=1\n",
        "\t\t\tcap.release()\n",
        "\t\t\tcv2.destroyAllWindows()\n",
        "\n",
        "\t\tprint(data.shape)\n",
        "\t\tinput_tensor = tens(data)\n",
        "\t\tinput_batch = input_tensor.unsqueeze(0)\n",
        "\t\tif torch.cuda.is_available():\n",
        "\t\t\tinput_batch = input_batch.to('cuda')\n",
        "\t\t\tmodel.to('cuda')\n",
        "\t\tprint(input_batch.shape)\n",
        "\t\twith torch.no_grad():\n",
        "\t\t\toutput = model(input_batch)\n",
        "\t \n",
        "\n",
        "\t\tlabels = torch.tensor(int_labels)\n",
        "\t\tprobabilities = torch.nn.functional.softmax(output[0], dim=0)\n",
        "\t\tprint('probabilities',probabilities.size(0))\n",
        "\t\t\n",
        "\t\ttop5_prob, top5_catid = torch.topk(probabilities, 5)\n",
        "\t\tprint(torch.topk(probabilities, 5))\n",
        "\t\tif categories[top5_catid[0]] == current_name:\n",
        "\t\t\ttrues.append(1)\n",
        "\t\t\tprint('true')\n",
        "\t\telse:\n",
        "\t\t\t#trues.append(0)\n",
        "\t\t\tprint('false')\n",
        "\t\toutputs.append(top5_prob[0].item())\n",
        "    #print(top5_catid)\n",
        "\t\t#for i in range(top5_prob.size(0)):\n",
        "\t\tglobal res\n",
        "\t\t\n",
        "\t\tres = categories[top5_catid[0]]\n",
        "\t\n",
        "\t\tprint(categories[top5_catid[0]], top5_prob[0].item())\n",
        "\t\t\n",
        "   \n",
        "    \n",
        "   \n",
        "\t\n",
        "\t\treturn render_template('index.html', filename=filename)\n",
        "\t\n",
        "@app.route('/result',methods=['GET'])\n",
        "def Index():\n",
        "\t\n",
        "\tprint(res)\n",
        "\treturn jsonify({'result': res})\n",
        "\n",
        "\n",
        "@app.route('/display/<filename>')\n",
        "def display_video(filename):\n",
        "\t#print('display_video filename: ' + filename)\n",
        "\treturn redirect(url_for('static', filename='uploads/' + filename), code=301)\n",
        "\n",
        "if __name__ == \"__main__\":\n",
        "    app.run()\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n",
        "\n"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3 (ipykernel)",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.9.12"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 5
}
