{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "TimeGAN.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "8NakQP80GeSS"
      },
      "source": [
        "import numpy as np"
      ],
      "execution_count": 1,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hrfyA_LUtlKW"
      },
      "source": [
        "# data loading and generation\n",
        "def Normalize(data):\n",
        "  numerator = data - np.min(data, 0)\n",
        "  denominator = np.max(data, 0) - np.min(data, 0)\n",
        "  norm_data = numerator / (denominator + 1e-7)\n",
        "  return norm_data"
      ],
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "9Ug0xl4eue2W",
        "outputId": "956578e8-f1d2-4619-9338-65d07fe304dc"
      },
      "source": [
        "dta = np.arange(10)\n",
        "dta = np.concatenate((dta, dta))\n",
        "# dta = np.stack((dta, dta))\n",
        "# dta\n",
        "Normalize(dta)"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "array([0.        , 0.11111111, 0.22222222, 0.33333333, 0.44444444,\n",
              "       0.55555555, 0.66666666, 0.77777777, 0.88888888, 0.99999999,\n",
              "       0.        , 0.11111111, 0.22222222, 0.33333333, 0.44444444,\n",
              "       0.55555555, 0.66666666, 0.77777777, 0.88888888, 0.99999999])"
            ]
          },
          "metadata": {},
          "execution_count": 3
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WZUEjCwCumYt"
      },
      "source": [
        "def Normalize2(dta):\n",
        "  return (dta - np.min(dta, 0)) /  (np.max(dta, 0) - np.min(dta, 0) + 1e-7)"
      ],
      "execution_count": 4,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xqzp-Y5UDLFz"
      },
      "source": [
        "def Normalize3(dta):\n",
        "  min = np.min(np.min(data, axis = 0), axis = 0)\n",
        "  dta = dta - min\n",
        "\n",
        "  max = np.max(np.max(dta, axis = 0), axis = 0)\n",
        "  norm_dta = dta / (max + 1e-7)\n",
        "  return norm_dta, min, max"
      ],
      "execution_count": 5,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "lXLrOZfNvXk8",
        "outputId": "023f6b00-7d75-4c0d-8fa5-fcfc4ccdb074"
      },
      "source": [
        "Normalize2(dta)"
      ],
      "execution_count": 6,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "array([0.        , 0.11111111, 0.22222222, 0.33333333, 0.44444444,\n",
              "       0.55555555, 0.66666666, 0.77777777, 0.88888888, 0.99999999,\n",
              "       0.        , 0.11111111, 0.22222222, 0.33333333, 0.44444444,\n",
              "       0.55555555, 0.66666666, 0.77777777, 0.88888888, 0.99999999])"
            ]
          },
          "metadata": {},
          "execution_count": 6
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1rlXKNFTwNg2"
      },
      "source": [
        "def sine_data_generation(no, seq_len, dim):\n",
        "  # number of samples, length of time series, feature dims\n",
        "  data = list()\n",
        "\n",
        "  for i in range(no):\n",
        "    temp = list()\n",
        "    for k in range(dim):\n",
        "      freq = np.random.uniform(0, 0.1)\n",
        "      phase = np.random.uniform(0, 0.1)\n",
        "      # gen sine signal based on freq and phase\n",
        "      temp_data = [np.sin(freq * j + phase) for j in range(seq_len)]\n",
        "      temp.append(temp_data)\n",
        "    # transpose\n",
        "    temp = np.transpose(np.asarray(temp))\n",
        "    # normalize data\n",
        "    temp = (temp + 1) * 0.5   \n",
        "    data.append(temp)\n",
        "  return data"
      ],
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "j3UfK1CHxceQ"
      },
      "source": [
        "dta = sine_data_generation(1, 100, 1)"
      ],
      "execution_count": 8,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "paixiBPvz8kM"
      },
      "source": [
        "def sine_data_generation_orig (no, seq_len, dim):\n",
        "  \"\"\"Sine data generation.\n",
        "  \n",
        "  Args:\n",
        "    - no: the number of samples\n",
        "    - seq_len: sequence length of the time-series\n",
        "    - dim: feature dimensions\n",
        "    \n",
        "  Returns:\n",
        "    - data: generated data\n",
        "  \"\"\"  \n",
        "  # Initialize the output\n",
        "  data = list()\n",
        "\n",
        "  # Generate sine data\n",
        "  for i in range(no):      \n",
        "    # Initialize each time-series\n",
        "    temp = list()\n",
        "    # For each feature\n",
        "    for k in range(dim):\n",
        "      # Randomly drawn frequency and phase\n",
        "      freq = np.random.uniform(0, 0.1)            \n",
        "      phase = np.random.uniform(0, 0.1)\n",
        "          \n",
        "      # Generate sine signal based on the drawn frequency and phase\n",
        "      temp_data = [np.sin(freq * j + phase) for j in range(seq_len)] \n",
        "      temp.append(temp_data)\n",
        "        \n",
        "    # Align row/column\n",
        "    temp = np.transpose(np.asarray(temp))        \n",
        "    # Normalize to [0,1]\n",
        "    temp = (temp + 1)*0.5\n",
        "    # Stack the generated data\n",
        "    data.append(temp)\n",
        "                \n",
        "  return data"
      ],
      "execution_count": 9,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "s2PUJMnzxkIz",
        "outputId": "1b89ce19-5e17-49f9-a812-c2cdf73eb6ca"
      },
      "source": [
        "import matplotlib.pyplot as plt\n",
        "dta[0].shape[0]\n",
        "x = np.arange(dta[0].shape[0])\n",
        "y = dta[0] \n",
        "x = np.transpose(x)\n",
        "y = np.transpose(y)\n",
        "y = y[0]\n",
        "x.shape"
      ],
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(100,)"
            ]
          },
          "metadata": {},
          "execution_count": 10
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 283
        },
        "id": "lTMxC4xGxkj-",
        "outputId": "4741c9b3-7f6d-4cfa-ac86-4b8e3b5ab0c7"
      },
      "source": [
        "plt.scatter(x,y)"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "<matplotlib.collections.PathCollection at 0x7faa8db98350>"
            ]
          },
          "metadata": {},
          "execution_count": 11
        },
        {
          "output_type": "display_data",
          "data": {
            "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD4CAYAAADiry33AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAWVUlEQVR4nO3dcYxl5X3e8e+zAzFrV81CsSOzy8JaXWO7cQvpFXZKFTkkwCatACVVulCpIDnZ/mHsFEVUoEayu64card1qIQsbwgOjVpwSi26iaJsiQmyhGxnZ2vXmHWwl3VjdnHKxrBpVa8wu/vrH/eMuQwzzLkzd+bee+73I41mzjnvmXmPzu4zv3nPOe9JVSFJ6q5N4+6AJGl9GfSS1HEGvSR1nEEvSR1n0EtSx50z7g4sduGFF9all1467m5I0lQ5dOjQX1bVm5faNnFBf+mllzI/Pz/ubkjSVEny58ttc+hGkjrOoJekjjPoJanjDHpJ6jiDXpI6buLuupGkWfPIV47ziQNP89zJU1y0ZTN3XHcZN16xdWTf36CXpDF65CvHuetzT3Lq5TMAHD95irs+9yTAyMLeoJekMVio4o+fPPWabadePsMnDjxt0EvStFpcxS/luSV+AayWQS9JG+T1qvjFLtqyeWQ/16CXpHU0GO4B2rzTb/O5c9xx3WUj64NBL0nrZPEQTZuQ3zquu26S7ALuAeaA+6rq7kXbtwMPAFuaNndW1R822+4C3g+cAT5UVQdG1ntJmkDDDNEs2HzuHL/xC+8eacAvWDHok8wB9wLXAMeAg0n2V9XhgWa/DvxeVX0qybuAPwQubb7eDfwt4CLgj5O8vaqWvwIhSVOszYXWxdajih/UpqK/EjhSVUcBkjwE3AAMBn0Bf735+keB55qvbwAeqqqXgG8nOdJ8vy+OoO+SNDEmrYof1CbotwLPDiwfA96zqM1HgP+e5IPAm4CfHdj3S4v2fc0RJdkD7AHYvn17m35L0sQYpopfuCC73lX8oFFdjL0J+J2q+ndJfhL43SQ/3nbnqtoH7APo9XptrldI0tgNW8VvZLgPahP0x4GLB5a3NesGvR/YBVBVX0xyHnBhy30laWqs9nbJjRiiWU6b2SsPAjuT7EjyI/Qvru5f1OY7wM8AJHkncB5womm3O8kbkuwAdgJ/OqrOS9JGWhiiWajg294uOc6QhxYVfVWdTnIbcID+rZP3V9VTSfYC81W1H/g14LeS3E7/2G+tqgKeSvJ79C/cngY+4B03kqbJ4MySmxLOVLvR5XFX8YNSLTu9UXq9XvlycEmTYDW3SsJ4xuKTHKqq3lLbfDJWkhZZza2SMFlV/CCDXpIGDFvFj+N2yWEZ9JLEcFX8XMLZqnV5G9R6MOglzaxpvFVyNQx6STNpUmaW3AgGvaSZMslz0qwXg17SzJjEmSU3gkEvqfNmsYofZNBL6rRJn1lyIxj0kjppWmaW3AgGvaTOGaaK79IQzXIMekmdYRW/NINe0lSblYee1sKglzS1Zumhp7Uw6CVNnVm/XXJYBr2kqTKrDz2thUEvaSpYxa+eQS9p4vnQ09oY9JImlrdLjoZBL2ki+dDT6Bj0kiaKVfzoGfSSJoZV/Pow6CWN1UIF/9zJU2xKOFMrP/ZkFT+cVkGfZBdwDzAH3FdVdy/a/kngp5vFNwJvqaotzbYzwJPNtu9U1fWj6Lik6bXctAUrhbxV/OqsGPRJ5oB7gWuAY8DBJPur6vBCm6q6faD9B4ErBr7Fqaq6fHRdljTNVjNtAVjFr0Wbiv5K4EhVHQVI8hBwA3B4mfY3AR8eTfckdcVqHngCq/hR2NSizVbg2YHlY82610hyCbADeGxg9XlJ5pN8KcmNy+y3p2kzf+LEiZZdlzQtFqr4tiE/lxD6Vbwhv3ajvhi7G3i4qgYvmV9SVceTvA14LMmTVfXM4E5VtQ/YB9Dr9dr+JSdpwjltwWRoU9EfBy4eWN7WrFvKbuDBwRVVdbz5fBR4nFeP30vqqGGq+DSfreDXR5uK/iCwM8kO+gG/G7h5caMk7wDOB744sO584PtV9VKSC4GrgI+PouOSJpMPPE2eFYO+qk4nuQ04QP/2yvur6qkke4H5qtrfNN0NPFT1qvuj3gl8OslZ+n893D14t46kbvGBp8mUavFwwkbq9Xo1Pz8/7m5IGoJV/PglOVRVvaW2+WSspDWxip98Br2kVbGKnx4GvaShWcVPF4NeUmtW8dPJoJfUilX89DLoJb0uq/jpZ9BLWpZVfDcY9JJewyq+Wwx6Sa9iFd89Br0kwCq+ywx6aYYt90q/12MVP30MemlGreaVflbx08mgl2aMLwOZPQa9NEOGudC6wCp++hn00gywip9tBr3UccNU8QsXZK3iu8WglzpooYJ/7uQpNiWcafGCIcO9uwx6qWMWV/ArhbxDNN1n0EsdsZpxeKv42WDQSx0w7N00VvGzxaCXptgwVfxcwtkqLrKKnzkGvTSlnHxMbRn00pRx8jENq1XQJ9kF3APMAfdV1d2Ltn8S+Olm8Y3AW6pqS7PtFuDXm23/uqoeGEXHpVlkFa/VWDHok8wB9wLXAMeAg0n2V9XhhTZVdftA+w8CVzRfXwB8GOjRfw7jULPviyM9CqnjrOK1Fm0q+iuBI1V1FCDJQ8ANwOFl2t9EP9wBrgMeraoXmn0fBXYBD66l09IssYrXWrUJ+q3AswPLx4D3LNUwySXADuCx19n3Nf8Ck+wB9gBs3769RZek7rOK16iM+mLsbuDhqmo/NR5QVfuAfQC9Xq/NtNhSp1nFa5TaBP1x4OKB5W3NuqXsBj6waN/3Ldr38fbdk2aLVbzWQ5ugPwjsTLKDfnDvBm5e3CjJO4DzgS8OrD4AfCzJ+c3ytcBda+qx1FFW8VovKwZ9VZ1Ochv90J4D7q+qp5LsBearan/TdDfwUNUrMyhV1QtJPkr/lwXA3oULs5L6rOK13lItpi/dSL1er+bn58fdDWlDWMVrVJIcqqreUtt8MlYaA6t4bSSDXtpgVvHaaAa9tEGs4jUuBr20AaziNU4GvbSOrOI1CQx6aZ1YxWtSGPTSiFnFa9IY9NIIWcVrEhn00ghYxWuSGfTSGlnFa9IZ9NIqLFTwz508xaaEMy2mErGK17gY9NKQFlfwK4W8VbzGzaCXWhp2HB6s4jUZDHqphWHG4cEqXpPFoJdexzBV/FzC2SousorXhDHopWV4N426wqCXFvGeeHWNQS8NsIpXFxn0Elbx6jaDXjPPKl5dZ9BrZlnFa1YY9JpJVvGaJQa9ZopVvGZRq6BPsgu4B5gD7ququ5do80vAR4AC/mdV3dysPwM82TT7TlVdP4J+S0OzitesWjHok8wB9wLXAMeAg0n2V9XhgTY7gbuAq6rqxSRvGfgWp6rq8hH3WxraJw483SrkreLVNW0q+iuBI1V1FCDJQ8ANwOGBNr8C3FtVLwJU1fOj7qi0Wm2Ha6zi1VWbWrTZCjw7sHysWTfo7cDbkzyR5EvNUM+C85LMN+tvXOoHJNnTtJk/ceLEUAcgvZ6F4ZqVQn7rls2GvDprVBdjzwF2Au8DtgFfSPLuqjoJXFJVx5O8DXgsyZNV9czgzlW1D9gH0Ov1Vn6Dg7QCq3jpFW0q+uPAxQPL25p1g44B+6vq5ar6NvBN+sFPVR1vPh8FHgeuWGOfpddlFS+9WpuK/iCwM8kO+gG/G7h5UZtHgJuAzyS5kP5QztEk5wPfr6qXmvVXAR8fWe+lAcPcOrl1y2aeuPPqDeiVNH4rBn1VnU5yG3CA/u2V91fVU0n2AvNVtb/Zdm2Sw8AZ4I6q+l6Svwd8OslZ+n893D14t440KsPeOnnHdZdtQK+kyZBq8VLjjdTr9Wp+fn7c3dCU8AEoqS/JoarqLbXNJ2M1tXwASmrHoNfUsYqXhmPQa6pYxUvDM+g1FazipdUz6DXxrOKltTHoNZEWKvjnTp5iU8KZFneHWcVLSzPoNXEWV/ArhbxVvPT6DHpNjGHH4cEqXmrDoNdEGGYcHqzipWEY9BqrYar4uYSzVVxkFS8NxaDX2Hg3jbQxDHptOO+JlzaWQa8NZRUvbTyDXhvCKl4aH4Ne684qXhovg17rxipemgwGvdaFVbw0OQx6jZRVvDR5DHqNjFW8NJkMeo3MJw483SrkreKljWXQa83aDtdYxUvjYdBrTdoO11jFS+Nj0GtVrOKl6bGpTaMku5I8neRIkjuXafNLSQ4neSrJfx5Yf0uSbzUft4yq4xqfhSp+pZDfumWzIS9NgBUr+iRzwL3ANcAx4GCS/VV1eKDNTuAu4KqqejHJW5r1FwAfBnpAAYeafV8c/aFovQ1z6+TWLZt54s6rN6BXklbSpqK/EjhSVUer6gfAQ8ANi9r8CnDvQoBX1fPN+uuAR6vqhWbbo8Cu0XRdG6ltFQ/94Zo7rrtsA3olqY02Y/RbgWcHlo8B71nU5u0ASZ4A5oCPVNUfLbPva/6OT7IH2AOwffv2tn3XBvABKGn6jepi7DnATuB9wDbgC0ne3XbnqtoH7APo9Xqv/yZobRgfgJK6oU3QHwcuHlje1qwbdAz4clW9DHw7yTfpB/9x+uE/uO/jq+2sNoZVvNQtbYL+ILAzyQ76wb0buHlRm0eAm4DPJLmQ/lDOUeAZ4GNJzm/aXUv/oq0mlFW81D0rBn1VnU5yG3CA/vj7/VX1VJK9wHxV7W+2XZvkMHAGuKOqvgeQ5KP0f1kA7K2qF9bjQLQ2VvFSd6VqsobEe71ezc/Pj7sbM8UqXpp+SQ5VVW+pbT4ZO8Os4qXZYNDPKKt4aXYY9DNkoYJ/7uQpNiWcaTFsZxUvTT+DfkYsruBXCnmreKk7DPoZ0falIGAVL3WNQd9xw1xwtYqXusmg77A2F1znEs5WcZFVvNRZBn0H+VIQSYMM+o7x1X6SFjPoO8KXgkhajkHfAcM+/ORLQaTZYtBPMacwkNSGQT+lnMJAUlsG/ZSxipc0LIN+iljFS1oNg34KWMVLWguDfsJZxUtaK4N+QlnFSxoVg34CWcVLGiWDfgK1nVLYKl5SGwb9BHEyMknrwaCfEE5GJmm9GPRjZhUvab1tatMoya4kTyc5kuTOJbbfmuREkq82H788sO3MwPr9o+z8tFuo4lcK+a1bNhvyklZtxYo+yRxwL3ANcAw4mGR/VR1e1PSzVXXbEt/iVFVdvvaudodTCkvaSG0q+iuBI1V1tKp+ADwE3LC+3equtlU8OKWwpNFoM0a/FXh2YPkY8J4l2v1ikp8CvgncXlUL+5yXZB44DdxdVY8s3jHJHmAPwPbt24fo/vTwAShJ4zKqi7G/DzxYVS8l+WfAA8DCeMMlVXU8yduAx5I8WVXPDO5cVfuAfQC9Xq9G1KeJ4QNQksapTdAfBy4eWN7WrPuhqvrewOJ9wMcHth1vPh9N8jhwBfCqoO8qq3hJk6BN0B8EdibZQT/gdwM3DzZI8taq+m6zeD3wjWb9+cD3m0r/QuAqBn4JdJlVvKRJsWLQV9XpJLcBB4A54P6qeirJXmC+qvYDH0pyPf1x+BeAW5vd3wl8OslZ+hd+717ibp3OWKjgnzt5ik0JZ2rlUSireEnrLdUijDZSr9er+fn5cXdjaMNU8GAVL2m0khyqqt5S23wydo2GHYcHq3hJG8ugXwOreEnTwKBfgzbTCc8lnK3iIqt4SWNi0K+CE5FJmiYG/ZCcTljStDHoW7KKlzStDPoWrOIlTTOD/nU4nbCkLjDolzHsFAZOJyxpUhn0izgRmaSuMegHOBGZpC4y6LGKl9RtMx/0VvGSum5mg94qXtKsmMmgt4qXNEtmKuit4iXNopkJeqt4SbNqZoK+zZTCYBUvqXs6H/RORiZp1nU66J2MTJI6GvRW8ZL0is4FvVW8JL1aZ4LeKYUlaWmb2jRKsivJ00mOJLlzie23JjmR5KvNxy8PbLslybeaj1tG2fkFC1V8m5B3SmFJs2bFij7JHHAvcA1wDDiYZH9VHV7U9LNVdduifS8APgz0gAIONfu+OJLeN7x1UpKW12bo5krgSFUdBUjyEHADsDjol3Id8GhVvdDs+yiwC3hwdd1d2nNedJWkZbUZutkKPDuwfKxZt9gvJvlakoeTXDzMvkn2JJlPMn/ixImWXX/FRVs2L9/5LZsNeUkzrdUYfQu/D1xaVX8beBR4YJidq2pfVfWqqvfmN7956B9+x3WXsfncuVet23zuHL/5jy/niTuvNuQlzbQ2QX8cuHhgeVuz7oeq6ntV9VKzeB/wd9vuOwo3XrGV3/iFd7N1y2aCVbwkDWozRn8Q2JlkB/2Q3g3cPNggyVur6rvN4vXAN5qvDwAfS3J+s3wtcNeae72EG6/YarBL0hJWDPqqOp3kNvqhPQfcX1VPJdkLzFfVfuBDSa4HTgMvALc2+76Q5KP0f1kA7F24MCtJ2hipqnH34VV6vV7Nz8+PuxuSNFWSHKqq3lLbRnUxVpI0oQx6Seo4g16SOm7ixuiTnAD+fA3f4kLgL0fUnWkxi8cMs3ncs3jMMJvHPewxX1JVSz6INHFBv1ZJ5pe7INFVs3jMMJvHPYvHDLN53KM8ZoduJKnjDHpJ6rguBv2+cXdgDGbxmGE2j3sWjxlm87hHdsydG6OXJL1aFyt6SdIAg16SOq4zQb/Se227IsnFSf4kyeEkTyX51Wb9BUkebd7N++jAjKGdkWQuyVeS/EGzvCPJl5tz/tkkPzLuPo5aki3Ny3z+LMk3kvxk1891ktubf9tfT/JgkvO6eK6T3J/k+SRfH1i35LlN339ojv9rSX5imJ/ViaAfeK/tzwHvAm5K8q7x9mrdnAZ+rareBbwX+EBzrHcCn6+qncDnm+Wu+VVemQIb4N8An6yqvwm8CLx/LL1aX/cAf1RV7wD+Dv3j7+y5TrIV+BDQq6ofpz9j7m66ea5/h/6rVQctd25/DtjZfOwBPjXMD+pE0DPwXtuq+gGw8F7bzqmq71bV/2i+/r/0/+NvpX+8C2/2egC4cTw9XB9JtgH/gP6LbUgS4Grg4aZJF4/5R4GfAn4boKp+UFUn6fi5pj99+uYk5wBvBL5LB891VX2B/rTug5Y7tzcA/7H6vgRsSfLWtj+rK0Hf9r22nZLkUuAK4MvAjw28/OUvgB8bU7fWy28C/wI42yz/DeBkVZ1ulrt4zncAJ4DPNENW9yV5Ex0+11V1HPi3wHfoB/xfAYfo/rlesNy5XVPGdSXoZ06Svwb8V+CfV9X/GdxW/XtmO3PfbJJ/CDxfVYfG3ZcNdg7wE8CnquoK4P+xaJimg+f6fPrV6w7gIuBNvHZ4YyaM8tx2Jeg35N20kyLJufRD/j9V1eea1f974U+55vPz4+rfOrgKuD7J/6I/LHc1/bHrLc2f99DNc34MOFZVX26WH6Yf/F0+1z8LfLuqTlTVy8Dn6J//rp/rBcud2zVlXFeC/ofvtW2uxu8G9o+5T+uiGZv+beAbVfXvBzbtB25pvr4F+G8b3bf1UlV3VdW2qrqU/rl9rKr+CfAnwD9qmnXqmAGq6i+AZ5Nc1qz6GeAwHT7X9Ids3pvkjc2/9YVj7vS5HrDcud0P/NPm7pv3An81MMSzsqrqxAfw88A3gWeAfznu/qzjcf59+n/OfQ34avPx8/THrD8PfAv4Y+CCcfd1nY7/fcAfNF+/DfhT4AjwX4A3jLt/63C8lwPzzfl+BDi/6+ca+FfAnwFfB34XeEMXzzXwIP3rEC/T/+vt/cudWyD07yx8BniS/l1JrX+WUyBIUsd1ZehGkrQMg16SOs6gl6SOM+glqeMMeknqOINekjrOoJekjvv/Vv8ybbyD+CMAAAAASUVORK5CYII=\n",
            "text/plain": [
              "<Figure size 432x288 with 1 Axes>"
            ]
          },
          "metadata": {
            "needs_background": "light"
          }
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "menPCTNXypDg"
      },
      "source": [
        "dta = sine_data_generation(2, 10, 3)"
      ],
      "execution_count": 12,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "Db57CUJVzleT",
        "outputId": "fcedcde7-a2ae-43e2-e360-e9066195fce7"
      },
      "source": [
        "dta"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[array([[0.50170776, 0.51355643, 0.53967664],\n",
              "        [0.52728952, 0.51426388, 0.5800946 ],\n",
              "        [0.55279978, 0.51497131, 0.61998155],\n",
              "        [0.57817171, 0.51567871, 0.65907307],\n",
              "        [0.60333883, 0.51638608, 0.69710997],\n",
              "        [0.62823522, 0.51709342, 0.73384009],\n",
              "        [0.65279565, 0.51780072, 0.76901993],\n",
              "        [0.67695576, 0.51850799, 0.80241623],\n",
              "        [0.70065227, 0.51921522, 0.83380761],\n",
              "        [0.72382308, 0.51992241, 0.86298594]]),\n",
              " array([[0.53740735, 0.5356633 , 0.54567212],\n",
              "        [0.57061955, 0.57842932, 0.56664778],\n",
              "        [0.60351648, 0.62061399, 0.58750465],\n",
              "        [0.63595128, 0.66190462, 0.60820555],\n",
              "        [0.66777914, 0.70199515, 0.62871358],\n",
              "        [0.69885798, 0.74058841, 0.6489922 ],\n",
              "        [0.72904904, 0.77739834, 0.66900524],\n",
              "        [0.75821755, 0.81215207, 0.68871705],\n",
              "        [0.78623328, 0.84459202, 0.70809249],\n",
              "        [0.81297116, 0.87447771, 0.72709703]])]"
            ]
          },
          "metadata": {},
          "execution_count": 13
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "4yMiCu5Bzl3r",
        "outputId": "a1aa9ef0-64f1-44a4-a3f0-a68c3c3de99a"
      },
      "source": [
        "np.random.seed(1)\n",
        "dta1 = sine_data_generation(1, 10, 1)\n",
        "np.random.seed(1)\n",
        "dta2 = sine_data_generation_orig(1, 10, 1)\n",
        "dta1, dta2"
      ],
      "execution_count": 14,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "([array([[0.53598509],\n",
              "         [0.5567448 ],\n",
              "         [0.57740585],\n",
              "         [0.5979323 ],\n",
              "         [0.61828847],\n",
              "         [0.63843895],\n",
              "         [0.65834871],\n",
              "         [0.67798313],\n",
              "         [0.69730807],\n",
              "         [0.71628993]])], [array([[0.53598509],\n",
              "         [0.5567448 ],\n",
              "         [0.57740585],\n",
              "         [0.5979323 ],\n",
              "         [0.61828847],\n",
              "         [0.63843895],\n",
              "         [0.65834871],\n",
              "         [0.67798313],\n",
              "         [0.69730807],\n",
              "         [0.71628993]])])"
            ]
          },
          "metadata": {},
          "execution_count": 14
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "lu6OWqU30OhC",
        "outputId": "3c211012-40bf-4888-860f-456365fa983b"
      },
      "source": [
        "sum(dta1[0] == dta2[0])"
      ],
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "array([10])"
            ]
          },
          "metadata": {},
          "execution_count": 15
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-loXeTXN0prj"
      },
      "source": [
        "def real_data_loading (data_name, seq_len):\n",
        "  \"\"\"Load and preprocess real-world datasets.\n",
        "  \n",
        "  Args:\n",
        "    - data_name: stock or energy\n",
        "    - seq_len: sequence length\n",
        "    \n",
        "  Returns:\n",
        "    - data: preprocessed data.\n",
        "  \"\"\"  \n",
        "  assert data_name in ['stock','energy']\n",
        "  \n",
        "  if data_name == 'stock':\n",
        "    ori_data = np.loadtxt('data/stock_data.csv', delimiter = \",\",skiprows = 1)\n",
        "  elif data_name == 'energy':\n",
        "    ori_data = np.loadtxt('data/energy_data.csv', delimiter = \",\",skiprows = 1)\n",
        "        \n",
        "  # Flip the data to make chronological data\n",
        "  ori_data = ori_data[::-1]\n",
        "  # Normalize the data\n",
        "  ori_data = MinMaxScaler(ori_data)\n",
        "    \n",
        "  # Preprocess the dataset\n",
        "  temp_data = []    \n",
        "  # Cut data by sequence length\n",
        "  for i in range(0, len(ori_data) - seq_len):\n",
        "    _x = ori_data[i:i + seq_len]\n",
        "    temp_data.append(_x)\n",
        "        \n",
        "  # Mix the datasets (to make it similar to i.i.d)\n",
        "  idx = np.random.permutation(len(temp_data))    \n",
        "  data = []\n",
        "  for i in range(len(temp_data)):\n",
        "    data.append(temp_data[idx[i]])\n",
        "    \n",
        "  return data"
      ],
      "execution_count": 16,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1wc93GGm2cDo"
      },
      "source": [
        "def train_test_divide (data_x, data_x_hat, data_t, data_t_hat, train_rate = 0.8):\n",
        "  \"\"\"Divide train and test data for both original and synthetic data.\n",
        "  \n",
        "  Args:\n",
        "    - data_x: original data\n",
        "    - data_x_hat: generated data\n",
        "    - data_t: original time\n",
        "    - data_t_hat: generated time\n",
        "    - train_rate: ratio of training data from the original data\n",
        "  \"\"\"\n",
        "  # Divide train/test index (original data)\n",
        "  no = len(data_x)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:int(no*train_rate)]\n",
        "  test_idx = idx[int(no*train_rate):]\n",
        "    \n",
        "  train_x = [data_x[i] for i in train_idx]\n",
        "  test_x = [data_x[i] for i in test_idx]\n",
        "  train_t = [data_t[i] for i in train_idx]\n",
        "  test_t = [data_t[i] for i in test_idx]      \n",
        "    \n",
        "  # Divide train/test index (synthetic data)\n",
        "  no = len(data_x_hat)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:int(no*train_rate)]\n",
        "  test_idx = idx[int(no*train_rate):]\n",
        "  \n",
        "  train_x_hat = [data_x_hat[i] for i in train_idx]\n",
        "  test_x_hat = [data_x_hat[i] for i in test_idx]\n",
        "  train_t_hat = [data_t_hat[i] for i in train_idx]\n",
        "  test_t_hat = [data_t_hat[i] for i in test_idx]\n",
        "  \n",
        "  return train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat\n",
        "\n",
        "\n",
        "def extract_time (data):\n",
        "  \"\"\"Returns Maximum sequence length and each sequence length.\n",
        "  \n",
        "  Args:\n",
        "    - data: original data\n",
        "    \n",
        "  Returns:\n",
        "    - time: extracted time information\n",
        "    - max_seq_len: maximum sequence length\n",
        "  \"\"\"\n",
        "  time = list()\n",
        "  max_seq_len = 0\n",
        "  for i in range(len(data)):\n",
        "    max_seq_len = max(max_seq_len, len(data[i][:,0]))\n",
        "    time.append(len(data[i][:,0]))\n",
        "    \n",
        "  return time, max_seq_len"
      ],
      "execution_count": 17,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "N0urfopc3JyC"
      },
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "\n",
        "# helper to decide which architecture to use\n",
        "def rnn_cell(module_name, hidden_dim, num_inputs):\n",
        "  assert module_name in [\"gru\", \"lstm\", \"lstmLN\", \"AdaFNN\"]\n",
        "  if (module_name == \"gru\"):\n",
        "    rnn_cell = nn.GRUCell(input_size = num_inputs, hidden_size=hidden_dim)\n",
        "    # no activation function probably need to reimplement these layers to match \n",
        "    # the tf implementation \n",
        "    # also not sure about bias default in tf\n",
        "  elif (module_name == \"lstm\"):\n",
        "    rnn_cell = nn.LSTMCell(input_size=num_inputs, hidden_size=hidden_dim)\n",
        "  elif (module_name == \"lstmLN\"): \n",
        "    pass\n",
        "    # need to implement\n",
        "  elif (module_name == \"AdaFNN\"):\n",
        "    pass\n",
        "    # need to implement\n",
        "  return rnn_cell\n",
        "\n",
        "# https://github.com/daehwannam/pytorch-rnn-util/blob/ba4d5ada3581fd6711f792d3fe79e58755613ba9/rnn_util/seq.py"
      ],
      "execution_count": 18,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xEQiaSd17ydb"
      },
      "source": [
        "def random_generator (batch_size, z_dim, T_mb, max_seq_len):\n",
        "  \"\"\"Random vector generation.\n",
        "  \n",
        "  Args:\n",
        "    - batch_size: size of the random vector\n",
        "    - z_dim: dimension of random vector\n",
        "    - T_mb: time information for the random vector\n",
        "    - max_seq_len: maximum sequence length\n",
        "    \n",
        "  Returns:\n",
        "    - Z_mb: generated random vector\n",
        "  \"\"\"\n",
        "  Z_mb = list()\n",
        "  for i in range(batch_size):\n",
        "    temp = np.zeros([max_seq_len, z_dim])\n",
        "    temp_Z = np.random.uniform(0., 1, [T_mb[i], z_dim])\n",
        "    temp[:T_mb[i],:] = temp_Z\n",
        "    Z_mb.append(temp_Z)\n",
        "  return Z_mb"
      ],
      "execution_count": 19,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "5eCF1JuW6jsK",
        "outputId": "b3635cc8-2df8-4e5a-ae69-61b56fd512a1"
      },
      "source": [
        "torch.manual_seed(1)\n",
        "rnn = nn.GRUCell(5, 10)\n",
        "#rnn = nn.GRUCell(10, 20)\n",
        "input = torch.randn(1, 5)\n",
        "hx = torch.randn(1, 10)\n",
        "res1 = rnn(input, hx)\n",
        "res1"
      ],
      "execution_count": 20,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "tensor([[-0.2808,  0.4106, -0.7252, -0.6151, -0.5328,  0.8263, -0.6560, -0.5309,\n",
              "          0.5347,  0.0276]], grad_fn=<AddBackward0>)"
            ]
          },
          "metadata": {},
          "execution_count": 20
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WpqWkkXL6poB"
      },
      "source": [
        "def batch_generator(data, time, batch_size):\n",
        "  \"\"\"Mini-batch generator.\n",
        "  \n",
        "  Args:\n",
        "    - data: time-series data\n",
        "    - time: time information\n",
        "    - batch_size: the number of samples in each batch\n",
        "    \n",
        "  Returns:\n",
        "    - X_mb: time-series data in each batch\n",
        "    - T_mb: time information in each batch\n",
        "  \"\"\"\n",
        "  no = len(data)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:batch_size]     \n",
        "            \n",
        "  X_mb = list(data[i] for i in train_idx)\n",
        "  T_mb = list(time[i] for i in train_idx)\n",
        "  \n",
        "  return X_mb, T_mb"
      ],
      "execution_count": 21,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "D0ayFYCaCY4D"
      },
      "source": [
        "def timegan(ori_data, parameters):\n",
        "  # Basic Parameters\n",
        "  no, seq_len, dim = np.asarray(ori_data).shape\n",
        "  # max seq length and each sequence length\n",
        "  ori_time, max_seq_len = extract_time(ori_data)\n",
        "  # Normalization\n",
        "  ori_data, min, max = Normalize3(ori_data)\n",
        "  # Network Params\n",
        "  hidden_dim   = parameters['hidden_dim'] \n",
        "  num_layers   = parameters['num_layer']\n",
        "  iterations   = parameters['iterations']\n",
        "  batch_size   = parameters['batch_size']\n",
        "  module_name  = parameters['module'] \n",
        "  z_dim        = dim\n",
        "  gamma        = 1\n",
        "  # need to implement cells from scratch and bundle them up into the multi rnn cell\n",
        "  # https://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html\n",
        "  # actually we only have to use torch.nn.RNN, has a num_layers argument, also has a tanh argument\n",
        "  # so we don't even need to do it with cells i think!\n",
        "  # dynamic versions \n",
        "  # https://github.com/songyouwei/ABSA-PyTorch/blob/master/layers/dynamic_rnn.py\n"
      ],
      "execution_count": 22,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_LhVapECF2V8"
      },
      "source": [
        "def timegan(ori_data, parameters):\n",
        "  no, seq_len, dim = np.asarray(ori_data).shape\n",
        "  ori_time, max_seq_len = extract_time(ori_data)\n",
        "  ori_data, min, max = Normalize3(ori_data)\n",
        "  # Network Params\n",
        "  hidden_dim   = parameters['hidden_dim'] \n",
        "  num_layers   = parameters['num_layer']\n",
        "  iterations   = parameters['iterations']\n",
        "  batch_size   = parameters['batch_size']\n",
        "  module_name  = parameters['module'] \n",
        "  z_dim        = dim\n",
        "  gamma        = 1\n",
        "  input_dim    = 10 # todo set input dim dependent on original data\n",
        "  # Inputs are X, Z and T\n",
        "  def embedder(X, T):\n",
        "    # X input time series features, T input time information \n",
        "    # bias is initialized to ones by default in tensorflow 1.x\n",
        "    # todo: wrap the if elif stuff in a function\n",
        "    if (module_name == \"rnn\"): # \"todo:dependent on ori_data\"\n",
        "      e_cell = torch.nn.RNN(input_size = input_dim, hidden_size = hidden_dim, \n",
        "                            num_layers = num_layers, nonlinearity=\"tanh\", bias = True, batch_first= True)\n",
        "    elif (module_name == \"lstm\"):\n",
        "      pass\n",
        "    elif (module_name == \"gru\"):\n",
        "      pass\n",
        "    elif (module_name == \"adafnn\"):\n",
        "      pass\n",
        "    # compute outputs\n",
        "    # dynamic rnn here https://github.com/songyouwei/ABSA-PyTorch/blob/master/layers/dynamic_rnn.py\n",
        "    # else we can just use the regular RNN function\n",
        "    # initialize h0 as learnable parameter? \n",
        "    h0 = torch.randn(num_layers, batch_size, hidden_size)\n",
        "    e_outputs, e_last_states = e_cell(X, h0)\n",
        "    # pass through fully connected layer \n",
        "    H = torch.sigmoid(nn.linear(e_outputs, hidden_dim))\n",
        "    return H\n",
        "    \n",
        "  def recovery (H, T):\n",
        "    if (module_name == \"rnn\"):\n",
        "      r_cell = torch.nn.RNN(input_size = input_dim, hidden_size = hidden_dim, \n",
        "                            num_layers = num_layers, nonlinearity=\"tanh\", bias = True, batch_first= True)\n",
        "    elif (module_name == \"lstm\"):\n",
        "      pass\n",
        "    elif (module_name == \"gru\"):\n",
        "      pass\n",
        "    elif (module_name == \"adafnn\"):\n",
        "      pass\n",
        "  \n",
        "    # get recovery\n",
        "    h0 = torch.randn(num_layers, batch_size, hidden_size)\n",
        "    r_outputs, r_last_states = r_cell(H, h0)\n",
        "    X_tilde = torch.sigmoid(nn.linear(r_outputs, dim)) # dimension of original data\n",
        "    return X_tilde\n",
        "  \n",
        "  def generator(Z, T):\n",
        "    # todo: define get_RNN function\n",
        "    e_cell = get_RNN(module_name, hidden_dim, input_dim, num_layers)\n",
        "    h0 = torch.randn(num_layers, batch_size, hidden_size)\n",
        "    e_outputs, e_last_states = e_cell(Z, h0)\n",
        "    E = torch.sigmoid(nn.linear(e_outputs, hidden_dim))\n",
        "    return E\n",
        "  \n",
        "  def supervisor(H, T):\n",
        "    # todo: Wrap all this in another function so we can dynamically adjust everything\n",
        "    # with a single function call\n",
        "    e_cell = get_RNN(module_name, hidden_dim, input_dim, num_layers)\n",
        "    h0 = torch.randn(num_layers, batch_size, hidden_size)\n",
        "    e_outputs, e_last_states = e_cell(H, h0)\n",
        "    S = torch.sigmoid(nn.linear(e_outputs, hidden_dim))\n",
        "    return S\n",
        "\n",
        "  def discriminator(H, T):\n",
        "    # todo: add dynamic sequence length \n",
        "    d_cell = getRNN(module_name, hidden_dim, input_dim, num_layers)\n",
        "    h0 = torch.randn(num_layers, batch_size, hidden_size)\n",
        "    d_outputs, d_last_states = d_cell(H, h0)\n",
        "    Y_hat = nn.linear(d_outputs, 1)\n",
        "    return Y_hat\n",
        "\n",
        "  # Embedder & Recovery\n",
        "  H = embedder(X, T)\n",
        "  X_tilde = recovery(H, T)\n",
        "    \n",
        "  # Generator\n",
        "  E_hat = generator(Z, T)\n",
        "  H_hat = supervisor(E_hat, T)\n",
        "  H_hat_supervised = supervisor(H, T)\n",
        "    \n",
        "  # Synthetic data\n",
        "  X_hat = recovery(H_hat, T)\n",
        "    \n",
        "  # Discriminator\n",
        "  Y_fake = discriminator(H_hat, T)\n",
        "  Y_real = discriminator(H, T)     \n",
        "  Y_fake_e = discriminator(E_hat, T)\n",
        "\n",
        "  # i think parameters are trainable by default in pytorch (?)\n",
        "  # if it doesn't work we need to add as torch parameter\n",
        "  # self.alpha = nn.Parameter(torch.tensor(0.5, requires_grad=True))\n",
        "  # https://discuss.pytorch.org/t/how-to-make-a-tensor-part-of-model-parameters/51037/6\n",
        "\n",
        "  # Discriminator loss \n",
        "  # i think we need to use logits, not 100% sure\n",
        "  # D_loss_real = nn.BCELoss(torch.ones_like(Y_real), Y_real)\n",
        "  D_loss_real = nn.BCEWithLogitsLoss(torch.ones_like(Y_real), Y_real)\n",
        "  D_loss_fake = nn.BCEWithLogitsLoss(torch.zeros_like(Y_fake), Y_fake)\n",
        "  D_loss_fake_e = nn.BCEWithLogitsLoss(torch.zeros_like(Y_fake_e), Y_fake_e)\n",
        "  D_loss = D_loss_real + D_loss_fake + gamma * D_loss_fake_e\n",
        "\n",
        "  # Generator loss\n",
        "  # Adversarial loss\n",
        "  G_loss_U = nn.BCEWithLogitsLoss(torch.ones_like(Y_fake), Y_fake)\n",
        "  G_loss_U_e = nn.BCEWithLogitsLoss(torch_ones_like(Y_fake_e), Y_fake_e)\n",
        "\n",
        "  # Supervised loss\n",
        "  # h is the return value of the embedding network => need to pay attention to \n",
        "  # shapes of returned tensors\n",
        "  G_loss_S = nn.MSELoss(H[:,1,:], H_hat_supervised[:,:-1,:])\n",
        "\n",
        "  # two moments\n",
        "  # x is placeholder of shape [None, max_seq_len, dim]\n",
        "  # None => keeps batch size flexible\n",
        "  # we use the first axis to normalize => batch norm\n",
        "  # returns mean and variance, we only care about the variance\n",
        "  # G_loss_V1 = tf.reduce_mean(tf.abs(tf.sqrt(tf.nn.moments(X_hat,[0])[1] + 1e-6) - tf.sqrt(tf.nn.moments(X,[0])[1] + 1e-6)))\n",
        "  # G_loss_V1 = tf.reduce_mean(tf.abs(tf.sqrt(BatchVariance_hat + 1e-6) - tf.sqrt(BatchVariance + 1e-6)))\n",
        "  # G_loss_V1 = tf.reduce_mean(tf.abs(BatchStd_hat - Batch_Std))\n",
        "  # no dimension is given, thus we return a single value, the batch mean of the abs of the differences of vars\n",
        "  # torch.var_mean returns a tuple (var, mean) => 0th element is variance\n",
        "  # tf.nn.moments does not use bessels correction\n",
        "  # i think we dont have to reduce in the end since we should obtain a one dimensional tensor, need to test\n",
        "  G_loss_V1 = torch.mean(torch.abs((torch.std(X_hat, [0], unbiased = False)) + 1e-6 - (torch.std(X, [0]) + 1e-6)))\n",
        "  # G_loss_V2 = tf.reduce_mean(tf.abs((tf.nn.moments(X_hat,[0])[0]) - (tf.nn.moments(X,[0])[0])))\n",
        "  G_loss_V2 = torch.mean(torch.abs((torch.mean(X_hat, [0]) - (torch.mean(X, [0])))))\n",
        "  G_loss_V = G_loss_V1 + G_loss_V2\n",
        "  # sum everything up\n",
        "  G_loss = G_loss_U + gamma * G_loss_U_e + 100 * tf.sqrt(G_loss_S) + 100*G_loss_V \n",
        "\n",
        "  # Embedder network loss\n",
        "  E_loss_T0 = nn.MSELoss(X, X_tilde)\n",
        "  E_loss0 = 10 * torch.sqrt(E_loss_T0)\n",
        "  E_loss = E_loss0  + 0.1 * G_loss_S\n",
        "\n",
        "  # training is done in 4 distinct training loops\n",
        "  "
      ],
      "execution_count": 23,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "8zLiLMmB7Vh_",
        "outputId": "195fc61d-df56-47c1-8c4c-12eeaf5c44ec"
      },
      "source": [
        "nn.BCELoss(torch.ones(1), torch.zeros(1))"
      ],
      "execution_count": 24,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.7/dist-packages/torch/nn/_reduction.py:42: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead.\n",
            "  warnings.warn(warning.format(ret))\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "BCELoss()"
            ]
          },
          "metadata": {},
          "execution_count": 24
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "AA6a5nCwZE7H",
        "outputId": "1ce1976d-e4e1-496c-9c1a-c8979d31674c"
      },
      "source": [
        "loss = nn.BCEWithLogitsLoss()\n",
        "input = torch.randn(3, requires_grad=True)\n",
        "target = torch.empty(3).random_(2)\n",
        "output = loss(input, target)\n",
        "output"
      ],
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "tensor(0.9558, grad_fn=<BinaryCrossEntropyWithLogitsBackward>)"
            ]
          },
          "metadata": {},
          "execution_count": 25
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KGDue0fHAA9s"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uSwXivP9Zbam"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nz1H6QtyaEsa"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "e-cybIWZaSFy"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dEHCN9nYaSio"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "B7r4TICfaisz"
      },
      "source": [
        ""
      ],
      "execution_count": 25,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "gDaUrz30ao6C",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "0aff827e-b482-4385-f2aa-bc6d4897c8f4"
      },
      "source": [
        "test = torch.nn.RNN(input_size = 5, hidden_size= 10, num_layers=2, nonlinearity=\"tanh\", bias = True, batch_first=True)\n",
        "input = torch.randn(1, 10, 5)\n",
        "h0 = torch.randn(2, 1, 10)\n",
        "output, hn = test(input, h0)\n",
        "output"
      ],
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "tensor([[[ 0.6440,  0.4090,  0.6669, -0.7245,  0.9293, -0.8816,  0.2027,\n",
              "           0.0604, -0.7681,  0.1004],\n",
              "         [ 0.8545,  0.6267, -0.2362,  0.1335,  0.6135, -0.3519, -0.4598,\n",
              "           0.1437, -0.6159,  0.3881],\n",
              "         [ 0.5354,  0.3174,  0.3125, -0.0881,  0.3082, -0.4653, -0.2973,\n",
              "           0.0196, -0.1805,  0.5621],\n",
              "         [ 0.1535, -0.1906,  0.2505, -0.4002,  0.4702, -0.4689, -0.4161,\n",
              "           0.0950, -0.5879,  0.5591],\n",
              "         [ 0.2362,  0.0858,  0.0666, -0.2530,  0.5588, -0.2973, -0.1830,\n",
              "           0.1253, -0.2696,  0.2916],\n",
              "         [ 0.1430, -0.1385,  0.0664, -0.2075,  0.5770, -0.1295, -0.1648,\n",
              "           0.2977, -0.1752,  0.2234],\n",
              "         [ 0.2326, -0.2248,  0.2384, -0.2234,  0.7741, -0.4528, -0.1645,\n",
              "           0.1504, -0.2843,  0.6134],\n",
              "         [ 0.1824, -0.0494, -0.2474, -0.2566,  0.4945,  0.0823, -0.0281,\n",
              "           0.3909, -0.0203,  0.0955],\n",
              "         [-0.2310, -0.4456, -0.0093, -0.3309,  0.5148, -0.1646, -0.0289,\n",
              "          -0.0672,  0.0557,  0.3293],\n",
              "         [-0.2688, -0.6436,  0.1896, -0.2266,  0.7682, -0.0805, -0.2712,\n",
              "           0.2361, -0.3711,  0.4124]]], grad_fn=<TransposeBackward1>)"
            ]
          },
          "metadata": {},
          "execution_count": 26
        }
      ]
    }
  ]
}
