{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 105,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ppppppp tensor([[0.0000, 0.0000, 0.1000, 0.9000, 0.0000, 0.0000, 0.0000],\n",
      "        [0.0000, 0.0000, 0.5000, 0.5000, 0.0000, 0.0000, 0.0000]])\n",
      "aaaaaaaaaaaaa tensor([0., 1., 2., 3., 4., 5., 6.])\n",
      "meannnnnnnnnnn tensor([[2.9000],\n",
      "        [2.5000]])\n",
      "mask的长度 tensor(1)\n",
      "squeeze前的b tensor([[[8.4100e+00, 3.6100e+00, 8.1000e-01, 1.0000e-02, 1.2100e+00,\n",
      "          4.4100e+00, 9.6100e+00]],\n",
      "\n",
      "        [[6.2500e+00, 2.2500e+00, 2.5000e-01, 2.5000e-01, 2.2500e+00,\n",
      "          6.2500e+00, 1.2250e+01]]])\n",
      "squeeze后的b tensor([[8.4100e+00, 3.6100e+00, 8.1000e-01, 1.0000e-02, 1.2100e+00, 4.4100e+00,\n",
      "         9.6100e+00],\n",
      "        [6.2500e+00, 2.2500e+00, 2.5000e-01, 2.5000e-01, 2.2500e+00, 6.2500e+00,\n",
      "         1.2250e+01]])\n",
      "torch.Size([2, 7])\n",
      "p乘b tensor([[0.0000, 0.0000, 0.0810, 0.0090, 0.0000, 0.0000, 0.0000],\n",
      "        [0.0000, 0.0000, 0.1250, 0.1250, 0.0000, 0.0000, 0.0000]])\n",
      "torch.Size([2, 7])\n",
      "tensor(0.1000) tensor(0.0900)\n"
     ]
    }
   ],
   "source": [
    "import torch.nn as nn\n",
    "import torch\n",
    "class MeanVarianceLoss(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.lambda_1 = 0.1\n",
    "        self.lambda_2 = 0.01#0.05\n",
    "        self.start_age = 0\n",
    "        self.end_age = 6\n",
    "    def forward(self, input, target,mask):\n",
    "#         N = input.size()[0]\n",
    "#         target = target.type(torch.FloatTensor).cuda()\n",
    "#         m = nn.Softmax(dim=1)\n",
    "        p =input\n",
    "        print(\"ppppppp\",p)\n",
    "        # mean loss\n",
    "        a = torch.arange(self.start_age, self.end_age + 1, dtype=torch.float32)\n",
    "        print(\"aaaaaaaaaaaaa\",a)\n",
    "        mean = (p * a).sum(1, keepdim=True)\n",
    "        print(\"meannnnnnnnnnn\",mean)\n",
    "#         print(\"a\",a.shape)# torch.Size([100])\n",
    "#         print(\"mean\",(p * a).sum(1, keepdim=True).shape)#torch.Size([1200, 1])\n",
    "#         print(\"mask1\",mask.shape)#torch.Size([1200, 1])\n",
    "        len_mask = mask.sum()\n",
    "#         mse = (mean - target)**2\n",
    "#         mean_loss = mse.mean() / 2.0\n",
    "        print(\"mask的长度\",len_mask)\n",
    "        L1_loss = (torch.abs(mean-target)*mask).sum()/len_mask\n",
    "        mean_loss = L1_loss\n",
    "        # variance loss\n",
    "#         b = (a[None, :] - mean[:, None])**2\n",
    "#         a = a.unsqueeze(0).expand_as(mean).float() \n",
    "        b = (a[None, :] - mean[:, None])**2\n",
    "        print(\"squeeze前的b\",b)\n",
    "        b = b.squeeze(1)\n",
    "        print(\"squeeze后的b\",b)\n",
    "        print(b.shape)\n",
    "#         print(\"b\",b.shape)#[1200, 1, 100]\n",
    "#         var = p*b\n",
    "#         print(\"VVVVVVVVVVVVVVV\",var.shape)\n",
    "        print(\"p乘b\",p*b)\n",
    "        var = (p * b).sum(1, keepdim=True)\n",
    "        print((p * b).squeeze(1).shape)\n",
    "        #print(\"0000000\",var) 833左右\n",
    "#         print(var.shape)\n",
    "#        print(\"var\",var.shape)#var torch.Size([1200, 1, 100])\n",
    "#         var = torch.pow(var,1/2)\n",
    "        #print(\"powwwwwwwwwwwww\",var) 28左右\n",
    "#         print(0)\n",
    "#         print(p)\n",
    "        variance_loss = (var*mask).sum()/len_mask\n",
    "#         print(\"111111111\",(var*mask).sum())\n",
    "#         print(\"22222222\",variance_loss)\n",
    "#         return\n",
    "#         print((var*mask).shape)\n",
    "#         variance_loss = var.mean()\n",
    "#         print(mean_loss.item(),variance_loss.item())\n",
    "        return mean_loss, variance_loss\n",
    "\n",
    "\n",
    "mv = MeanVarianceLoss()\n",
    "input = torch.tensor([[0.0,0.0,0.1,0.9,0.0,0.,0.],[0,0.,0.5,0.5,0.,0.,0.]])\n",
    "target = torch.tensor([[3.],[0]])\n",
    "mask = torch.tensor([[1],[0]])\n",
    "mean_loss,var_loss = mv(input,target,mask)\n",
    "print(mean_loss,var_loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[ 2.3026,  0.1054, 16.1181]])\n",
      "\n",
      "loss:  tensor([[0., 0., 0.]])\n",
      "tensor(0.)\n"
     ]
    }
   ],
   "source": [
    "#验证KL loss\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    " \n",
    "    # -----------------------------------  KLDiv loss\n",
    "loss_f = nn.KLDivLoss(size_average=False, reduce=False)\n",
    "loss_f_mean = nn.KLDivLoss(size_average=True, reduce=True)\n",
    " \n",
    "    # 生成网络输出 以及 目标输出\n",
    "output = torch.from_numpy(np.array([[0.1, 0.9, 0.0]])).float()\n",
    "# output.requires_grad = True\n",
    "target = torch.from_numpy(np.array([[ -2.3026,  -0.1054, -16.1181]])).float()\n",
    "# m = nn.Softmax(dim=1)\n",
    "# output = m(output)\n",
    "#output必须经过log\n",
    "output=torch.log(output+1e-7)\n",
    "print(output)\n",
    "loss_1 = loss_f(output, target)\n",
    "loss_mean = loss_f_mean(output, target)\n",
    " \n",
    "print('\\nloss: ', loss_1)\n",
    "print(loss_mean)\n",
    "\n",
    "# print('\\nloss_mean: ', loss_mean)\n",
    " \n",
    "#     # 熟悉计算公式，手动计算样本的第一个元素的loss，注意这里只有一个样本，是 element-wise计算的\n",
    "# output = output[0].detach().numpy()\n",
    "# output_1 = output[0]  # 第一个样本的第一个元素\n",
    "# target_1 = target[0][0].numpy()\n",
    " \n",
    "# loss_1 = target_1 * (np.log(target_1) - output_1)\n",
    " \n",
    "# print('\\n第一个样本第一个元素的loss：', loss_1)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "high_loss tensor([[-1.0014e-05, -1.0014e-05, -1.0014e-05]])\n",
      "low_loss\n",
      "high_loss tensor([[-1.0014e-05, -1.0014e-05, -1.0014e-05]])\n",
      "low_loss tensor([[ 1.1513e+01,  1.1513e+01, -1.0014e-05]])\n",
      "high_loss_sum tensor(-3.0041e-05)\n",
      "low_loss_sum tensor(23.0258)\n",
      "\n",
      "loss:  tensor(4.6051)\n"
     ]
    }
   ],
   "source": [
    "#验证交叉熵\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    " \n",
    "    # -----------------------------------  KLDiv loss\n",
    "# loss_f = nn.KLDivLoss(size_average=False, reduce=False)\n",
    "# loss_f_mean = nn.KLDivLoss(size_average=True, reduce=True)\n",
    " \n",
    "    # 生成网络输出 以及 目标输出\n",
    "output = torch.from_numpy(np.array([[0, 1 ,0.]])).float()\n",
    "# output.requires_grad = True\n",
    "# target = torch.from_numpy(np.array([[0.1, 0.9, 0]])).float()\n",
    "labels_high = torch.from_numpy(np.array([[0.0, 1, 0.]])).float()\n",
    "labels_low = torch.from_numpy(np.array([[1, 0.0, 0.]])).float()\n",
    " \n",
    "per_cross_ent_high = - labels_high * torch.log(output + 1e-5) - (1 - labels_high) * torch.log(1 - output+1e-5) #*indices  \n",
    "print(\"high_loss\",per_cross_ent_high)\n",
    "per_cross_ent_low = - labels_low * torch.log(output + 1e-5) - (1 - labels_low) * torch.log(1 - output+1e-5) #*indices \n",
    "print(\"low_loss\",per_cross_ent_low)\n",
    "\n",
    "loss = 0.2*per_cross_ent_low + 0.8*per_cross_ent_high\n",
    "print('high_loss',per_cross_ent_high)\n",
    "print('low_loss',per_cross_ent_low)\n",
    "\n",
    "print('high_loss_sum',per_cross_ent_high.sum())\n",
    "print('low_loss_sum',per_cross_ent_low.sum())\n",
    "print('\\nloss: ', loss.sum())\n",
    "\n",
    "#output [0., 1, 0.],loss 2.3026\n",
    "#output [0.1, 0.9 ,0.],loss 0.6501\n",
    "#output [0.2, 0.8 ,0.],loss 0.7235\n",
    "#[0.01, 0.99 ,0.], loss 0.9389"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "16.0\n",
      "15.0\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "angle_label = 15.2\n",
    "ceil = np.ceil(angle_label)\n",
    "floor = np.floor(angle_label)\n",
    "print(ceil)\n",
    "print(floor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1.  0.9 1. ]\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "array1 = np.array([1,0.9,1])\n",
    "array2 = np.array([0.2,0.9,1])\n",
    "# arrays = np.array(list(zip(array1,array2)))\n",
    "# print(arrays)\n",
    "# array_max = []\n",
    "print(np.maximum(array1,array2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
