{
  "nbformat": 4,
  "nbformat_minor": 5,
  "metadata": {
    "kernelspec": {
      "display_name": "torch",
      "language": "python",
      "name": "torch"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.7.11"
    },
    "varInspector": {
      "cols": {
        "lenName": 16,
        "lenType": 16,
        "lenVar": 40
      },
      "kernels_config": {
        "python": {
          "delete_cmd_postfix": "",
          "delete_cmd_prefix": "del ",
          "library": "var_list.py",
          "varRefreshCmd": "print(var_dic_list())"
        },
        "r": {
          "delete_cmd_postfix": ") ",
          "delete_cmd_prefix": "rm(",
          "library": "var_list.r",
          "varRefreshCmd": "cat(var_dic_list()) "
        }
      },
      "types_to_exclude": [
        "module",
        "function",
        "builtin_function_or_method",
        "instance",
        "_Feature"
      ],
      "window_display": false
    },
    "colab": {
      "name": "trainings_loops.ipynb",
      "provenance": [],
      "collapsed_sections": []
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "a1c59f8a"
      },
      "source": [
        "import numpy as np\n",
        "import torch\n",
        "import torch.nn as nn\n",
        "from torch import optim\n",
        "import torch.nn.functional as F\n",
        "\n",
        "# from data_loading import sine_data_generation\n",
        "# from utils import random_generator\n",
        "# from data_loading import MinMaxScaler\n",
        "\n",
        "from torch.utils.data import DataLoader\n",
        "\n",
        "\n",
        "# from utils import extract_time\n"
      ],
      "id": "a1c59f8a",
      "execution_count": 18,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ahJcCrzPQl-k"
      },
      "source": [
        "def train_test_divide (data_x, data_x_hat, data_t, data_t_hat, train_rate = 0.8):\n",
        "  \"\"\"Divide train and test data for both original and synthetic data.\n",
        "  \n",
        "  Args:\n",
        "    - data_x: original data\n",
        "    - data_x_hat: generated data\n",
        "    - data_t: original time\n",
        "    - data_t_hat: generated time\n",
        "    - train_rate: ratio of training data from the original data\n",
        "  \"\"\"\n",
        "  # Divide train/test index (original data)\n",
        "  no = len(data_x)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:int(no*train_rate)]\n",
        "  test_idx = idx[int(no*train_rate):]\n",
        "    \n",
        "  train_x = [data_x[i] for i in train_idx]\n",
        "  test_x = [data_x[i] for i in test_idx]\n",
        "  train_t = [data_t[i] for i in train_idx]\n",
        "  test_t = [data_t[i] for i in test_idx]      \n",
        "    \n",
        "  # Divide train/test index (synthetic data)\n",
        "  no = len(data_x_hat)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:int(no*train_rate)]\n",
        "  test_idx = idx[int(no*train_rate):]\n",
        "  \n",
        "  train_x_hat = [data_x_hat[i] for i in train_idx]\n",
        "  test_x_hat = [data_x_hat[i] for i in test_idx]\n",
        "  train_t_hat = [data_t_hat[i] for i in train_idx]\n",
        "  test_t_hat = [data_t_hat[i] for i in test_idx]\n",
        "  \n",
        "  return train_x, train_x_hat, test_x, test_x_hat, train_t, train_t_hat, test_t, test_t_hat\n",
        "\n",
        "\n",
        "def extract_time (data):\n",
        "  \"\"\"Returns Maximum sequence length and each sequence length.\n",
        "  \n",
        "  Args:\n",
        "    - data: original data\n",
        "    \n",
        "  Returns:\n",
        "    - time: extracted time information\n",
        "    - max_seq_len: maximum sequence length\n",
        "  \"\"\"\n",
        "  time = list()\n",
        "  max_seq_len = 0\n",
        "  for i in range(len(data)):\n",
        "    max_seq_len = max(max_seq_len, len(data[i][:,0]))\n",
        "    time.append(len(data[i][:,0]))\n",
        "    \n",
        "  return time, max_seq_len\n",
        "\n",
        "def random_generator (batch_size, z_dim, T_mb, max_seq_len):\n",
        "  \"\"\"Random vector generation.\n",
        "  \n",
        "  Args:\n",
        "    - batch_size: size of the random vector\n",
        "    - z_dim: dimension of random vector\n",
        "    - T_mb: time information for the random vector\n",
        "    - max_seq_len: maximum sequence length\n",
        "    \n",
        "  Returns:\n",
        "    - Z_mb: generated random vector\n",
        "  \"\"\"\n",
        "  Z_mb = list()\n",
        "  for i in range(batch_size):\n",
        "    temp = np.zeros([max_seq_len, z_dim])\n",
        "    temp_Z = np.random.uniform(0., 1, [T_mb[i], z_dim])\n",
        "    temp[:T_mb[i],:] = temp_Z\n",
        "    Z_mb.append(temp_Z)\n",
        "  return Z_mb\n",
        "\n",
        "\n",
        "def batch_generator(data, time, batch_size):\n",
        "  \"\"\"Mini-batch generator.\n",
        "  \n",
        "  Args:\n",
        "    - data: time-series data\n",
        "    - time: time information\n",
        "    - batch_size: the number of samples in each batch\n",
        "    \n",
        "  Returns:\n",
        "    - X_mb: time-series data in each batch\n",
        "    - T_mb: time information in each batch\n",
        "  \"\"\"\n",
        "  no = len(data)\n",
        "  idx = np.random.permutation(no)\n",
        "  train_idx = idx[:batch_size]     \n",
        "            \n",
        "  X_mb = list(data[i] for i in train_idx)\n",
        "  T_mb = list(time[i] for i in train_idx)\n",
        "  \n",
        "  return X_mb, T_mb"
      ],
      "id": "ahJcCrzPQl-k",
      "execution_count": 19,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KsYQ9_Y1Qe70"
      },
      "source": [
        "def MinMaxScaler(data):\n",
        "  \"\"\"Min Max normalizer.\n",
        "  \n",
        "  Args:\n",
        "    - data: original data\n",
        "  \n",
        "  Returns:\n",
        "    - norm_data: normalized data\n",
        "  \"\"\"\n",
        "  numerator = data - np.min(data, 0)\n",
        "  denominator = np.max(data, 0) - np.min(data, 0)\n",
        "  norm_data = numerator / (denominator + 1e-7)\n",
        "  return norm_data\n",
        "\n",
        "\n",
        "def sine_data_generation (no, seq_len, dim):\n",
        "  \"\"\"Sine data generation.\n",
        "  \n",
        "  Args:\n",
        "    - no: the number of samples\n",
        "    - seq_len: sequence length of the time-series\n",
        "    - dim: feature dimensions\n",
        "    \n",
        "  Returns:\n",
        "    - data: generated data\n",
        "  \"\"\"  \n",
        "  # Initialize the output\n",
        "  data = list()\n",
        "\n",
        "  # Generate sine data\n",
        "  for i in range(no):      \n",
        "    # Initialize each time-series\n",
        "    temp = list()\n",
        "    # For each feature\n",
        "    for k in range(dim):\n",
        "      # Randomly drawn frequency and phase\n",
        "      freq = np.random.uniform(0, 0.1)            \n",
        "      phase = np.random.uniform(0, 0.1)\n",
        "          \n",
        "      # Generate sine signal based on the drawn frequency and phase\n",
        "      temp_data = [np.sin(freq * j + phase) for j in range(seq_len)] \n",
        "      temp.append(temp_data)\n",
        "        \n",
        "    # Align row/column\n",
        "    temp = np.transpose(np.asarray(temp))        \n",
        "    # Normalize to [0,1]\n",
        "    temp = (temp + 1)*0.5\n",
        "    # Stack the generated data\n",
        "    data.append(temp)\n",
        "                \n",
        "  return data\n",
        "    \n",
        "\n",
        "def real_data_loading (data_name, seq_len):\n",
        "  \"\"\"Load and preprocess real-world datasets.\n",
        "  \n",
        "  Args:\n",
        "    - data_name: stock or energy\n",
        "    - seq_len: sequence length\n",
        "    \n",
        "  Returns:\n",
        "    - data: preprocessed data.\n",
        "  \"\"\"  \n",
        "  assert data_name in ['stock','energy']\n",
        "  \n",
        "  if data_name == 'stock':\n",
        "    ori_data = np.loadtxt('data/stock_data.csv', delimiter = \",\",skiprows = 1)\n",
        "  elif data_name == 'energy':\n",
        "    ori_data = np.loadtxt('data/energy_data.csv', delimiter = \",\",skiprows = 1)\n",
        "        \n",
        "  # Flip the data to make chronological data\n",
        "  ori_data = ori_data[::-1]\n",
        "  # Normalize the data\n",
        "  ori_data = MinMaxScaler(ori_data)\n",
        "    \n",
        "  # Preprocess the dataset\n",
        "  temp_data = []    \n",
        "  # Cut data by sequence length\n",
        "  for i in range(0, len(ori_data) - seq_len):\n",
        "    _x = ori_data[i:i + seq_len]\n",
        "    temp_data.append(_x)\n",
        "        \n",
        "  # Mix the datasets (to make it similar to i.i.d)\n",
        "  idx = np.random.permutation(len(temp_data))    \n",
        "  data = []\n",
        "  for i in range(len(temp_data)):\n",
        "    data.append(temp_data[idx[i]])\n",
        "    \n",
        "  return data"
      ],
      "id": "KsYQ9_Y1Qe70",
      "execution_count": 20,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4e849290"
      },
      "source": [
        "Define Class for Module Construction"
      ],
      "id": "4e849290"
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "294d9abe"
      },
      "source": [
        "class Time_GAN_module(nn.Module):\n",
        "    \"\"\"\n",
        "    Class from which a module of the Time GAN Architecture can be constructed, \n",
        "    consisting of a n_layer stacked RNN layers and a fully connected layer\n",
        "    \n",
        "    input_size = dim of data (depending if module operates on latent or non-latent space)\n",
        "    \"\"\"\n",
        "    def __init__(self, input_size, output_size, hidden_dim, n_layers, activation=torch.sigmoid):\n",
        "        super(Time_GAN_module, self).__init__()\n",
        "\n",
        "        # Parameters\n",
        "        self.hidden_dim = hidden_dim\n",
        "        self.n_layers = n_layers\n",
        "        self.sigma = activation\n",
        "\n",
        "        #Defining the layers\n",
        "        # RNN Layer\n",
        "        self.rnn = nn.GRU(input_size, hidden_dim, n_layers, batch_first=True)   \n",
        "        # Fully connected layer\n",
        "        self.fc = nn.Linear(hidden_dim, output_size)\n",
        "        \n",
        "    def forward(self, x):\n",
        "    \n",
        "            batch_size = x.size(0)\n",
        "\n",
        "            # Initializing hidden state for first input using method defined below\n",
        "            hidden = self.init_hidden(batch_size)\n",
        "\n",
        "            # Passing in the input and hidden state into the model and obtaining outputs\n",
        "            out, hidden = self.rnn(x, hidden)\n",
        "        \n",
        "            # Reshaping the outputs such that it can be fit into the fully connected layer\n",
        "            out = out.contiguous().view(-1, self.hidden_dim)\n",
        "            out = self.fc(out)\n",
        "            \n",
        "            if self.sigma == nn.Identity:\n",
        "                idendity = nn.Identity()\n",
        "                return idendity(out)\n",
        "                \n",
        "            out = self.sigma(out)\n",
        "            \n",
        "            # HIDDEN STATES WERDEN IN DER PAPER IMPLEMENTIERUNG AUCH COMPUTED, ALLERDINGS NICHT BENUTZT?\n",
        "            \n",
        "            return out, hidden\n",
        "    \n",
        "    def init_hidden(self, batch_size):\n",
        "        # This method generates the first hidden state of zeros which we'll use in the forward pass\n",
        "        # We'll send the tensor holding the hidden state to the device we specified earlier as well\n",
        "        hidden = torch.zeros(self.n_layers, batch_size, self.hidden_dim)\n",
        "        return hidden"
      ],
      "id": "294d9abe",
      "execution_count": 21,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "65ad1d5e"
      },
      "source": [
        "Parameters"
      ],
      "id": "65ad1d5e"
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6a2e3f85"
      },
      "source": [
        "input_size = 5 # sequence length = number of features\n",
        "output_size = 20\n",
        "hidden_dim = 20\n",
        "n_layers = 3\n",
        "gamma = 1\n",
        "\n",
        "no, seq_len, dim = 12800, 24, 5 \n",
        "\n",
        "batch_size = 128\n",
        "epoch = 100"
      ],
      "id": "6a2e3f85",
      "execution_count": 22,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "b35b74eb"
      },
      "source": [
        "Data Generation"
      ],
      "id": "b35b74eb"
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "2373fb48",
        "outputId": "00332056-d68c-4eb0-e28c-a2df411f706f"
      },
      "source": [
        "data = sine_data_generation(no, seq_len, dim)\n",
        "data = MinMaxScaler(data)\n",
        "data = torch.Tensor(data)\n",
        "data.shape"
      ],
      "id": "2373fb48",
      "execution_count": 23,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "torch.Size([12800, 24, 5])"
            ]
          },
          "metadata": {},
          "execution_count": 23
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "90e9892c"
      },
      "source": [
        "Create Modules"
      ],
      "id": "90e9892c"
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nVra-i_jieoc"
      },
      "source": [
        "# embedder: num_layers = num_layers, fully_connected dim = hidden_dim\n",
        "# recovery: num_layers = num_layers, fully_connected dim = dim \n",
        "# generator: num layers = num_layers, fully_connected dim = hidden_dim\n",
        "# supervisor: num_layers = num_layers-1, fully_connected dim = hidden_dim\n",
        "# discriminator: num_layers = num_layers, fully_connected dim = 1"
      ],
      "id": "nVra-i_jieoc",
      "execution_count": 24,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NU73wgsajAtM"
      },
      "source": [
        ""
      ],
      "id": "NU73wgsajAtM",
      "execution_count": 24,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "db4e7203",
        "outputId": "ef492e6d-288a-4490-fed9-c1de8efa01e2"
      },
      "source": [
        "Embedder = Time_GAN_module(input_size=dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "Embedder"
      ],
      "id": "db4e7203",
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(5, 20, num_layers=3, batch_first=True)\n",
              "  (fc): Linear(in_features=20, out_features=20, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 25
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "9edcdcda",
        "outputId": "229d16fe-0033-4aac-bc4b-b76259aab192"
      },
      "source": [
        "Recovery = Time_GAN_module(input_size=hidden_dim, output_size=dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "Recovery"
      ],
      "id": "9edcdcda",
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(20, 20, num_layers=3, batch_first=True)\n",
              "  (fc): Linear(in_features=20, out_features=5, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 26
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "700d9516",
        "outputId": "9cba92f8-a7d6-47af-fd4b-2bb8ba88ff2f"
      },
      "source": [
        "Generator = Time_GAN_module(input_size=dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "Generator"
      ],
      "id": "700d9516",
      "execution_count": 27,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(5, 20, num_layers=3, batch_first=True)\n",
              "  (fc): Linear(in_features=20, out_features=20, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 27
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "771283fc",
        "outputId": "8d5fc441-0979-4088-99f6-5689a6fe8129"
      },
      "source": [
        "Supervisor = Time_GAN_module(input_size=hidden_dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers-1)\n",
        "Supervisor"
      ],
      "id": "771283fc",
      "execution_count": 28,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(20, 20, num_layers=2, batch_first=True)\n",
              "  (fc): Linear(in_features=20, out_features=20, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 28
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "03e296b8",
        "outputId": "ad523d22-e431-443b-a6e1-d46f9594b56b"
      },
      "source": [
        "Discriminator = Time_GAN_module(input_size=hidden_dim, output_size=1, hidden_dim=hidden_dim, n_layers=n_layers, \n",
        "                               activation=nn.Identity)\n",
        "Discriminator"
      ],
      "id": "03e296b8",
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(20, 20, num_layers=3, batch_first=True)\n",
              "  (fc): Linear(in_features=20, out_features=1, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 29
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "a27f997b"
      },
      "source": [
        "Create Optimizers"
      ],
      "id": "a27f997b"
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "c7aed175"
      },
      "source": [
        "embedder_optimizer = optim.Adam(Embedder.parameters(), lr=0.001)\n",
        "recovery_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "supervisor_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "discriminator_optimizer = optim.Adam(Discriminator.parameters(), lr=0.001)\n",
        "generator_optimizer = optim.Adam(Generator.parameters(), lr=0.001)"
      ],
      "id": "c7aed175",
      "execution_count": 30,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "a05ad143"
      },
      "source": [
        "Data Loader"
      ],
      "id": "a05ad143"
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "f04458be",
        "outputId": "e1153625-f0ed-4135-c3de-5ea2b1918fd1"
      },
      "source": [
        "loader = DataLoader(data, batch_size, shuffle=True)\n",
        "X = next(iter(loader))\n",
        "H, _ = Embedder(X.float())\n",
        "H_re = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "H.shape, H_re.shape"
      ],
      "id": "f04458be",
      "execution_count": 31,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(torch.Size([3072, 20]), torch.Size([128, 24, 20]))"
            ]
          },
          "metadata": {},
          "execution_count": 31
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "xVFi9yxqeDqL",
        "outputId": "505cf277-e768-445b-c616-7fc03eeb8c3a"
      },
      "source": [
        "batch_size, seq_len, hidden_dim"
      ],
      "id": "xVFi9yxqeDqL",
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(128, 24, 20)"
            ]
          },
          "metadata": {},
          "execution_count": 32
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "bcfd5039"
      },
      "source": [
        "Embedder Training"
      ],
      "id": "bcfd5039"
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "bd9274ab",
        "outputId": "4483b81a-8fb2-4fd5-f8ae-51b26d70609b"
      },
      "source": [
        "print('Start Embedding Network Training')\n",
        "\n",
        "for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "        \n",
        "        MSE_loss = nn.MSELoss()\n",
        "        \n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        X_tilde, _ = Recovery(H)\n",
        "        X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "        E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Recovery.zero_grad()\n",
        "\n",
        "        E_loss0.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        recovery_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', e_loss: ' + str(np.sqrt(E_loss0.detach().numpy())))\n",
        "\n",
        "print('Finish Embedding Network Training')"
      ],
      "id": "bd9274ab",
      "execution_count": 33,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start Embedding Network Training\n",
            "step: 1/100, e_loss: 1.6197354\n",
            "step: 2/100, e_loss: 1.4922516\n",
            "step: 3/100, e_loss: 1.3177843\n",
            "step: 4/100, e_loss: 1.3521059\n",
            "step: 5/100, e_loss: 1.2815592\n",
            "step: 6/100, e_loss: 1.3027968\n",
            "step: 7/100, e_loss: 1.2591631\n",
            "step: 8/100, e_loss: 1.32466\n",
            "step: 9/100, e_loss: 1.1807969\n",
            "step: 10/100, e_loss: 1.1071249\n",
            "step: 11/100, e_loss: 1.0591449\n",
            "step: 12/100, e_loss: 1.0917394\n",
            "step: 13/100, e_loss: 0.9606062\n",
            "step: 14/100, e_loss: 0.99057525\n",
            "step: 15/100, e_loss: 0.9419938\n",
            "step: 16/100, e_loss: 0.69091374\n",
            "step: 17/100, e_loss: 0.6154215\n",
            "step: 18/100, e_loss: 0.57664984\n",
            "step: 19/100, e_loss: 0.54816735\n",
            "step: 20/100, e_loss: 0.5283633\n",
            "step: 21/100, e_loss: 0.49599373\n",
            "step: 22/100, e_loss: 0.4994968\n",
            "step: 23/100, e_loss: 0.48997918\n",
            "step: 24/100, e_loss: 0.483458\n",
            "step: 25/100, e_loss: 0.4680167\n",
            "step: 26/100, e_loss: 0.46107087\n",
            "step: 27/100, e_loss: 0.4470878\n",
            "step: 28/100, e_loss: 0.4443866\n",
            "step: 29/100, e_loss: 0.43648893\n",
            "step: 30/100, e_loss: 0.43595234\n",
            "step: 31/100, e_loss: 0.42352912\n",
            "step: 32/100, e_loss: 0.41141066\n",
            "step: 33/100, e_loss: 0.41288385\n",
            "step: 34/100, e_loss: 0.42006195\n",
            "step: 35/100, e_loss: 0.40695882\n",
            "step: 36/100, e_loss: 0.3980866\n",
            "step: 37/100, e_loss: 0.40419948\n",
            "step: 38/100, e_loss: 0.3982579\n",
            "step: 39/100, e_loss: 0.39740038\n",
            "step: 40/100, e_loss: 0.38357496\n",
            "step: 41/100, e_loss: 0.38549003\n",
            "step: 42/100, e_loss: 0.3789971\n",
            "step: 43/100, e_loss: 0.37422812\n",
            "step: 44/100, e_loss: 0.37577742\n",
            "step: 45/100, e_loss: 0.3768609\n",
            "step: 46/100, e_loss: 0.36533457\n",
            "step: 47/100, e_loss: 0.36717245\n",
            "step: 48/100, e_loss: 0.3723603\n",
            "step: 49/100, e_loss: 0.37215614\n",
            "step: 50/100, e_loss: 0.36131316\n",
            "step: 51/100, e_loss: 0.3629664\n",
            "step: 52/100, e_loss: 0.36041358\n",
            "step: 53/100, e_loss: 0.36171204\n",
            "step: 54/100, e_loss: 0.35787064\n",
            "step: 55/100, e_loss: 0.35513204\n",
            "step: 56/100, e_loss: 0.3591145\n",
            "step: 57/100, e_loss: 0.36372298\n",
            "step: 58/100, e_loss: 0.35741308\n",
            "step: 59/100, e_loss: 0.3580121\n",
            "step: 60/100, e_loss: 0.3625313\n",
            "step: 61/100, e_loss: 0.3493329\n",
            "step: 62/100, e_loss: 0.34981576\n",
            "step: 63/100, e_loss: 0.36481488\n",
            "step: 64/100, e_loss: 0.34025037\n",
            "step: 65/100, e_loss: 0.34612706\n",
            "step: 66/100, e_loss: 0.3519843\n",
            "step: 67/100, e_loss: 0.3341495\n",
            "step: 68/100, e_loss: 0.34399113\n",
            "step: 69/100, e_loss: 0.3379365\n",
            "step: 70/100, e_loss: 0.34603208\n",
            "step: 71/100, e_loss: 0.34137547\n",
            "step: 72/100, e_loss: 0.33728257\n",
            "step: 73/100, e_loss: 0.33899188\n",
            "step: 74/100, e_loss: 0.34081778\n",
            "step: 75/100, e_loss: 0.33216956\n",
            "step: 76/100, e_loss: 0.33230594\n",
            "step: 77/100, e_loss: 0.33755895\n",
            "step: 78/100, e_loss: 0.331658\n",
            "step: 79/100, e_loss: 0.32833588\n",
            "step: 80/100, e_loss: 0.3335021\n",
            "step: 81/100, e_loss: 0.32693714\n",
            "step: 82/100, e_loss: 0.32645646\n",
            "step: 83/100, e_loss: 0.33430612\n",
            "step: 84/100, e_loss: 0.3396664\n",
            "step: 85/100, e_loss: 0.326918\n",
            "step: 86/100, e_loss: 0.32817754\n",
            "step: 87/100, e_loss: 0.32903758\n",
            "step: 88/100, e_loss: 0.3202294\n",
            "step: 89/100, e_loss: 0.31766555\n",
            "step: 90/100, e_loss: 0.32731092\n",
            "step: 91/100, e_loss: 0.32451892\n",
            "step: 92/100, e_loss: 0.32468024\n",
            "step: 93/100, e_loss: 0.3230689\n",
            "step: 94/100, e_loss: 0.32423282\n",
            "step: 95/100, e_loss: 0.3112052\n",
            "step: 96/100, e_loss: 0.32525873\n",
            "step: 97/100, e_loss: 0.324771\n",
            "step: 98/100, e_loss: 0.3269967\n",
            "step: 99/100, e_loss: 0.3170213\n",
            "Finish Embedding Network Training\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "f1634b07"
      },
      "source": [
        "Training with supervised Loss"
      ],
      "id": "f1634b07"
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "c75a988f",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "c7c3014a-aa5b-48af-82fa-8e186a4566ce"
      },
      "source": [
        "print('Start Training with Supervised Loss Only')\n",
        "\n",
        "for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "\n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        H_hat_supervise, _ = Supervisor(H)\n",
        "        H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "        G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "\n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Supervisor.zero_grad()\n",
        "\n",
        "        G_loss_S.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        supervisor_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', s_loss: ' + str(np.sqrt(G_loss_S.detach().numpy())))\n",
        "\n",
        "print('Finish Training with Supervised Loss Only')"
      ],
      "id": "c75a988f",
      "execution_count": 34,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start Training with Supervised Loss Only\n",
            "step: 1/100, s_loss: 0.10398547\n",
            "step: 2/100, s_loss: 0.09963137\n",
            "step: 3/100, s_loss: 0.095322266\n",
            "step: 4/100, s_loss: 0.088691786\n",
            "step: 5/100, s_loss: 0.08786065\n",
            "step: 6/100, s_loss: 0.08571244\n",
            "step: 7/100, s_loss: 0.07921417\n",
            "step: 8/100, s_loss: 0.07805923\n",
            "step: 9/100, s_loss: 0.07324201\n",
            "step: 10/100, s_loss: 0.070104145\n",
            "step: 11/100, s_loss: 0.06643666\n",
            "step: 12/100, s_loss: 0.061148997\n",
            "step: 13/100, s_loss: 0.059470728\n",
            "step: 14/100, s_loss: 0.057204135\n",
            "step: 15/100, s_loss: 0.05500602\n",
            "step: 16/100, s_loss: 0.0514243\n",
            "step: 17/100, s_loss: 0.047650483\n",
            "step: 18/100, s_loss: 0.046424177\n",
            "step: 19/100, s_loss: 0.043680355\n",
            "step: 20/100, s_loss: 0.042070862\n",
            "step: 21/100, s_loss: 0.03882782\n",
            "step: 22/100, s_loss: 0.03869168\n",
            "step: 23/100, s_loss: 0.037388377\n",
            "step: 24/100, s_loss: 0.035496406\n",
            "step: 25/100, s_loss: 0.033455666\n",
            "step: 26/100, s_loss: 0.031500757\n",
            "step: 27/100, s_loss: 0.032185104\n",
            "step: 28/100, s_loss: 0.02985961\n",
            "step: 29/100, s_loss: 0.027589733\n",
            "step: 30/100, s_loss: 0.027020816\n",
            "step: 31/100, s_loss: 0.026489066\n",
            "step: 32/100, s_loss: 0.02532191\n",
            "step: 33/100, s_loss: 0.025203917\n",
            "step: 34/100, s_loss: 0.023369813\n",
            "step: 35/100, s_loss: 0.023195757\n",
            "step: 36/100, s_loss: 0.022479197\n",
            "step: 37/100, s_loss: 0.020846847\n",
            "step: 38/100, s_loss: 0.020704929\n",
            "step: 39/100, s_loss: 0.019366894\n",
            "step: 40/100, s_loss: 0.01937874\n",
            "step: 41/100, s_loss: 0.017930213\n",
            "step: 42/100, s_loss: 0.018117135\n",
            "step: 43/100, s_loss: 0.017529063\n",
            "step: 44/100, s_loss: 0.017066145\n",
            "step: 45/100, s_loss: 0.016003976\n",
            "step: 46/100, s_loss: 0.015619485\n",
            "step: 47/100, s_loss: 0.015053974\n",
            "step: 48/100, s_loss: 0.0141663775\n",
            "step: 49/100, s_loss: 0.013864631\n",
            "step: 50/100, s_loss: 0.013511238\n",
            "step: 51/100, s_loss: 0.013354129\n",
            "step: 52/100, s_loss: 0.013005076\n",
            "step: 53/100, s_loss: 0.012327996\n",
            "step: 54/100, s_loss: 0.012217128\n",
            "step: 55/100, s_loss: 0.011790587\n",
            "step: 56/100, s_loss: 0.011739324\n",
            "step: 57/100, s_loss: 0.011139704\n",
            "step: 58/100, s_loss: 0.010902156\n",
            "step: 59/100, s_loss: 0.010740873\n",
            "step: 60/100, s_loss: 0.010304184\n",
            "step: 61/100, s_loss: 0.010042758\n",
            "step: 62/100, s_loss: 0.009779748\n",
            "step: 63/100, s_loss: 0.009606998\n",
            "step: 64/100, s_loss: 0.009279513\n",
            "step: 65/100, s_loss: 0.008830949\n",
            "step: 66/100, s_loss: 0.008803555\n",
            "step: 67/100, s_loss: 0.008457704\n",
            "step: 68/100, s_loss: 0.008262992\n",
            "step: 69/100, s_loss: 0.008220219\n",
            "step: 70/100, s_loss: 0.0080046095\n",
            "step: 71/100, s_loss: 0.0076677124\n",
            "step: 72/100, s_loss: 0.007321903\n",
            "step: 73/100, s_loss: 0.0071835564\n",
            "step: 74/100, s_loss: 0.007262012\n",
            "step: 75/100, s_loss: 0.0067828116\n",
            "step: 76/100, s_loss: 0.006688265\n",
            "step: 77/100, s_loss: 0.0065214965\n",
            "step: 78/100, s_loss: 0.0064324373\n",
            "step: 79/100, s_loss: 0.006348527\n",
            "step: 80/100, s_loss: 0.006188889\n",
            "step: 81/100, s_loss: 0.0059971544\n",
            "step: 82/100, s_loss: 0.0059136297\n",
            "step: 83/100, s_loss: 0.0057822186\n",
            "step: 84/100, s_loss: 0.0055244192\n",
            "step: 85/100, s_loss: 0.0054707527\n",
            "step: 86/100, s_loss: 0.005264914\n",
            "step: 87/100, s_loss: 0.005288865\n",
            "step: 88/100, s_loss: 0.00532352\n",
            "step: 89/100, s_loss: 0.005018379\n",
            "step: 90/100, s_loss: 0.0047735334\n",
            "step: 91/100, s_loss: 0.004742968\n",
            "step: 92/100, s_loss: 0.004608948\n",
            "step: 93/100, s_loss: 0.0044726348\n",
            "step: 94/100, s_loss: 0.004422063\n",
            "step: 95/100, s_loss: 0.0042428947\n",
            "step: 96/100, s_loss: 0.004176508\n",
            "step: 97/100, s_loss: 0.0039801234\n",
            "step: 98/100, s_loss: 0.003990452\n",
            "step: 99/100, s_loss: 0.0037792262\n",
            "Finish Training with Supervised Loss Only\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1040c2c0"
      },
      "source": [
        "epoch = 2"
      ],
      "id": "1040c2c0",
      "execution_count": 35,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9e2cd6ad"
      },
      "source": [
        "random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])"
      ],
      "id": "9e2cd6ad",
      "execution_count": 36,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "627dd17f"
      },
      "source": [
        "loader = DataLoader(data, batch_size, shuffle=True)\n",
        "\n",
        "random_loader = DataLoader(random_data, batch_size, shuffle=True)\n",
        "\n",
        "binary_cross_entropy_loss = nn.BCEWithLogitsLoss()\n",
        "\n",
        "MSE_loss = nn.MSELoss()\n",
        "\n"
      ],
      "id": "627dd17f",
      "execution_count": 37,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "3i8UwTo2kuXi",
        "outputId": "8c39f349-3db7-4616-ce01-ec79f8d7505e"
      },
      "source": [
        "X = next(iter(loader))\n",
        "X.shape"
      ],
      "id": "3i8UwTo2kuXi",
      "execution_count": 38,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "torch.Size([128, 24, 5])"
            ]
          },
          "metadata": {},
          "execution_count": 38
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GFKdBugWo9OM"
      },
      "source": [
        "def TimeGAN(data, parameters):\n",
        "  hidden_dim = parameters[\"hidden_dim\"]\n",
        "  num_layers = parameters[\"num_layers\"]\n",
        "  iterations = parameters[\"iterations\"]\n",
        "  batch_size = parameters[\"batch_size\"]\n",
        "  module = parameters[\"module\"]\n",
        "  epoch = parameters[\"epoch\"]\n",
        "  no, seq_len, dim = np.asarray(data).shape\n",
        "  z_dim = dim\n",
        "  gamma = 1\n",
        "\n",
        "  Embedder = Time_GAN_module(input_size=z_dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=num_layers)\n",
        "  Recovery = Time_GAN_module(input_size=hidden_dim, output_size=dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "  Generator = Time_GAN_module(input_size=dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "  Supervisor = Time_GAN_module(input_size=hidden_dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers-1)\n",
        "  Discriminator = Time_GAN_module(input_size=hidden_dim, output_size=1, hidden_dim=hidden_dim, n_layers=n_layers, activation=nn.Identity)\n",
        "\n",
        "  embedder_optimizer = optim.Adam(Embedder.parameters(), lr=0.001)\n",
        "  recovery_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "  supervisor_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "  discriminator_optimizer = optim.Adam(Discriminator.parameters(), lr=0.001)\n",
        "  generator_optimizer = optim.Adam(Generator.parameters(), lr=0.001)\n",
        "\n",
        "  # Embedding Network Training\n",
        "  print('Start Embedding Network Training')\n",
        "  for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "        \n",
        "        MSE_loss = nn.MSELoss()\n",
        "        \n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        X_tilde, _ = Recovery(H)\n",
        "        X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "        E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Recovery.zero_grad()\n",
        "\n",
        "        E_loss0.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        recovery_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', e_loss: ' + str(np.sqrt(E_loss0.detach().numpy())))\n",
        "\n",
        "  print('Finish Embedding Network Training')\n",
        "\n",
        "  # Training only with supervised loss\n",
        "  print('Start Training with Supervised Loss Only')\n",
        "  for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "\n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        H_hat_supervise, _ = Supervisor(H)\n",
        "        H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "        G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "\n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Supervisor.zero_grad()\n",
        "\n",
        "        G_loss_S.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        supervisor_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', s_loss: ' + str(np.sqrt(G_loss_S.detach().numpy())))\n",
        "\n",
        "  print('Finish Training with Supervised Loss Only')\n",
        "  # Joint Training\n",
        "  print('Start Joint Training')\n",
        "  for itt in range(epoch):\n",
        "    for kk in range(2):\n",
        "      X = next(iter(loader))\n",
        "      random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "        \n",
        "      # Generator Training \n",
        "      ## Train Generator\n",
        "      z = torch.tensor(random_data)\n",
        "      z = z.float()\n",
        "        \n",
        "      e_hat, _ = Generator(z)\n",
        "      e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      H_hat, _ = Supervisor(e_hat)\n",
        "      H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      Y_fake = Discriminator(H_hat)\n",
        "      Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "      x_hat, _ = Recovery(H_hat)\n",
        "      x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "        \n",
        "      H, _ = Embedder(X.float())\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      H_hat_supervise, _ = Supervisor(H)\n",
        "      H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Generator.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "      Discriminator.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "\n",
        "      # line 267 of original implementation: \n",
        "      # G_loss_U, G_loss_S, G_loss_V\n",
        "      G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "        \n",
        "      G_loss_U = binary_cross_entropy_loss(torch.ones_like(Y_fake), Y_fake)\n",
        "        \n",
        "      G_loss_V1 = torch.mean(torch.abs((torch.std(x_hat, [0], unbiased = False)) + 1e-6 - (torch.std(X, [0]) + 1e-6)))\n",
        "      G_loss_V2 = torch.mean(torch.abs((torch.mean(x_hat, [0]) - (torch.mean(X, [0])))))\n",
        "      G_loss_V = G_loss_V1 + G_loss_V2\n",
        "        \n",
        "      # doing a backward step for each loss should result in gradients accumulating \n",
        "      # so we should be able to optimize them jointly\n",
        "      G_loss_S.backward(retain_graph=True)#\n",
        "      G_loss_U.backward(retain_graph=True)\n",
        "      G_loss_V.backward(retain_graph=True)#\n",
        "\n",
        "\n",
        "      generator_optimizer.step()\n",
        "      supervisor_optimizer.step()\n",
        "      discriminator_optimizer.step()\n",
        "      # Train Embedder \n",
        "      ## line 270: we only optimize E_loss_T0\n",
        "      ## E_loss_T0 = just mse of x and x_tilde\n",
        "      # but it calls E_solver which optimizes E_loss, which is a sum of \n",
        "      # E_loss0 and 0.1* G_loss_S\n",
        "      MSE_loss = nn.MSELoss()\n",
        "        \n",
        "      H, _ = Embedder(X.float())\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      X_tilde, _ = Recovery(H)\n",
        "      X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "      E_loss_T0 = MSE_loss(X, X_tilde)\n",
        "      E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "        \n",
        "      H_hat_supervise, _ = Supervisor(H)\n",
        "      H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "      G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "      E_loss = E_loss0  + 0.1 * G_loss_S\n",
        "        \n",
        "      G_loss_S.backward(retain_graph=True)\n",
        "      E_loss_T0.backward()\n",
        "        \n",
        "      Embedder.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "        \n",
        "      embedder_optimizer.step()\n",
        "      recovery_optimizer.step()\n",
        "      supervisor_optimizer.step()\n",
        "    # train Discriminator\n",
        "    for batch_index, X in enumerate(loader):\n",
        "      random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "      \n",
        "      z = torch.tensor(random_data)\n",
        "      z = z.float()\n",
        "\n",
        "      H, _ = Embedder(X)\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Y_real = Discriminator(H)\n",
        "      Y_real = torch.reshape(Y_real, (batch_size, seq_len, 1))\n",
        "      \n",
        "      e_hat, _ = Generator(z)\n",
        "      e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Y_fake_e = Discriminator(e_hat)\n",
        "      Y_fake_e = torch.reshape(Y_fake_e, (batch_size, seq_len, 1))\n",
        "        \n",
        "      H_hat, _ = Supervisor(e_hat)\n",
        "      H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      Y_fake = Discriminator(H_hat)\n",
        "      Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "      x_hat, _ = Recovery(H_hat)\n",
        "      x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "\n",
        "      Generator.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "      Discriminator.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "      Embedder.zero_grad()\n",
        "\n",
        "      # logits first, then targets\n",
        "      # D_loss_real(Y_real, torch.ones_like(Y_real))\n",
        "      D_loss_real = nn.BCEWithLogitsLoss()\n",
        "      DLR = D_loss_real(Y_real, torch.ones_like(Y_real))\n",
        "\n",
        "      D_loss_fake = nn.BCEWithLogitsLoss()\n",
        "      DLF = D_loss_fake(Y_fake, torch.zeros_like(Y_fake))\n",
        "\n",
        "      D_loss_fake_e = nn.BCEWithLogitsLoss()\n",
        "      DLF_e = D_loss_fake_e(Y_fake_e, torch.zeros_like(Y_fake_e))\n",
        "\n",
        "      D_loss = DLR + DLF + gamma * DLF_e\n",
        "\n",
        "      # D_loss.backward(retain_graph=True)\n",
        "      \n",
        "      # discriminator_optimizer.step()\n",
        "\n",
        "      # check discriminator loss before updating\n",
        "      check_d_loss = D_loss\n",
        "      if (check_d_loss > 0.15):\n",
        "        D_loss.backward(retain_graph=True)\n",
        "        discriminator_optimizer.step()\n",
        "      \n",
        "      print('step: '+ str(e) + '/' + str(epoch) + \n",
        "            ', D_loss: ' + str(D_loss.detach().numpy()) +\n",
        "            ', G_loss_U: ' + str(G_loss_U.detach().numpy()) + \n",
        "            ', G_loss_S: ' + str(G_loss_S.detach().numpy()) + \n",
        "            ', E_loss_t0: ' + str(np.sqrt(E_loss0.detach().numpy()))\n",
        "             )\n",
        "  print('Finish Joint Training')"
      ],
      "id": "GFKdBugWo9OM",
      "execution_count": 67,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YPbbUH0dJqjt"
      },
      "source": [
        "parameters = dict()\n",
        "parameters['module'] = 'gru' \n",
        "parameters['hidden_dim'] = 24\n",
        "parameters['num_layers'] = 3\n",
        "parameters['iterations'] = 10000\n",
        "parameters['batch_size'] = 128\n",
        "parameters['epoch'] = 100"
      ],
      "id": "YPbbUH0dJqjt",
      "execution_count": 68,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9rnug7qqmddM",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        },
        "outputId": "aacf2bb0-7f70-496a-d98f-0316b540c67a"
      },
      "source": [
        "TimeGAN(data, parameters)"
      ],
      "id": "9rnug7qqmddM",
      "execution_count": 69,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start Embedding Network Training\n",
            "step: 1/100, e_loss: 1.6255708\n",
            "step: 2/100, e_loss: 1.4654692\n",
            "step: 3/100, e_loss: 1.3650312\n",
            "step: 4/100, e_loss: 1.3015138\n",
            "step: 5/100, e_loss: 1.2590116\n",
            "step: 6/100, e_loss: 1.2514571\n",
            "step: 7/100, e_loss: 1.1545329\n",
            "step: 8/100, e_loss: 1.0581712\n",
            "step: 9/100, e_loss: 1.0139271\n",
            "step: 10/100, e_loss: 0.99184287\n",
            "step: 11/100, e_loss: 0.9825848\n",
            "step: 12/100, e_loss: 0.7539981\n",
            "step: 13/100, e_loss: 0.64375967\n",
            "step: 14/100, e_loss: 0.57008266\n",
            "step: 15/100, e_loss: 0.5210716\n",
            "step: 16/100, e_loss: 0.4996004\n",
            "step: 17/100, e_loss: 0.4849279\n",
            "step: 18/100, e_loss: 0.46555084\n",
            "step: 19/100, e_loss: 0.47098878\n",
            "step: 20/100, e_loss: 0.44210142\n",
            "step: 21/100, e_loss: 0.44026056\n",
            "step: 22/100, e_loss: 0.42776528\n",
            "step: 23/100, e_loss: 0.42436484\n",
            "step: 24/100, e_loss: 0.40902647\n",
            "step: 25/100, e_loss: 0.4068279\n",
            "step: 26/100, e_loss: 0.3989191\n",
            "step: 27/100, e_loss: 0.39009485\n",
            "step: 28/100, e_loss: 0.3912966\n",
            "step: 29/100, e_loss: 0.40551588\n",
            "step: 30/100, e_loss: 0.40117383\n",
            "step: 31/100, e_loss: 0.37779817\n",
            "step: 32/100, e_loss: 0.37092173\n",
            "step: 33/100, e_loss: 0.36891732\n",
            "step: 34/100, e_loss: 0.37068486\n",
            "step: 35/100, e_loss: 0.3642721\n",
            "step: 36/100, e_loss: 0.36452585\n",
            "step: 37/100, e_loss: 0.36731678\n",
            "step: 38/100, e_loss: 0.35553682\n",
            "step: 39/100, e_loss: 0.35021752\n",
            "step: 40/100, e_loss: 0.356307\n",
            "step: 41/100, e_loss: 0.3486393\n",
            "step: 42/100, e_loss: 0.3525145\n",
            "step: 43/100, e_loss: 0.3462555\n",
            "step: 44/100, e_loss: 0.35117\n",
            "step: 45/100, e_loss: 0.3481897\n",
            "step: 46/100, e_loss: 0.34308854\n",
            "step: 47/100, e_loss: 0.34361547\n",
            "step: 48/100, e_loss: 0.34259376\n",
            "step: 49/100, e_loss: 0.34422442\n",
            "step: 50/100, e_loss: 0.340204\n",
            "step: 51/100, e_loss: 0.3377795\n",
            "step: 52/100, e_loss: 0.33565566\n",
            "step: 53/100, e_loss: 0.33490655\n",
            "step: 54/100, e_loss: 0.3233081\n",
            "step: 55/100, e_loss: 0.32658336\n",
            "step: 56/100, e_loss: 0.3318858\n",
            "step: 57/100, e_loss: 0.32575113\n",
            "step: 58/100, e_loss: 0.3337588\n",
            "step: 59/100, e_loss: 0.32782155\n",
            "step: 60/100, e_loss: 0.32561427\n",
            "step: 61/100, e_loss: 0.3319422\n",
            "step: 62/100, e_loss: 0.3205207\n",
            "step: 63/100, e_loss: 0.32429916\n",
            "step: 64/100, e_loss: 0.3174126\n",
            "step: 65/100, e_loss: 0.31494224\n",
            "step: 66/100, e_loss: 0.33052573\n",
            "step: 67/100, e_loss: 0.3198562\n",
            "step: 68/100, e_loss: 0.31666967\n",
            "step: 69/100, e_loss: 0.31990707\n",
            "step: 70/100, e_loss: 0.32087556\n",
            "step: 71/100, e_loss: 0.33296967\n",
            "step: 72/100, e_loss: 0.31420094\n",
            "step: 73/100, e_loss: 0.30547455\n",
            "step: 74/100, e_loss: 0.3107972\n",
            "step: 75/100, e_loss: 0.31390318\n",
            "step: 76/100, e_loss: 0.31027827\n",
            "step: 77/100, e_loss: 0.31833404\n",
            "step: 78/100, e_loss: 0.32145333\n",
            "step: 79/100, e_loss: 0.30299264\n",
            "step: 80/100, e_loss: 0.30019253\n",
            "step: 81/100, e_loss: 0.30475143\n",
            "step: 82/100, e_loss: 0.29968968\n",
            "step: 83/100, e_loss: 0.30375203\n",
            "step: 84/100, e_loss: 0.31921643\n",
            "step: 85/100, e_loss: 0.3041509\n",
            "step: 86/100, e_loss: 0.29824057\n",
            "step: 87/100, e_loss: 0.30622655\n",
            "step: 88/100, e_loss: 0.3111339\n",
            "step: 89/100, e_loss: 0.29433203\n",
            "step: 90/100, e_loss: 0.30471846\n",
            "step: 91/100, e_loss: 0.29213133\n",
            "step: 92/100, e_loss: 0.30407855\n",
            "step: 93/100, e_loss: 0.29241523\n",
            "step: 94/100, e_loss: 0.29821935\n",
            "step: 95/100, e_loss: 0.29214948\n",
            "step: 96/100, e_loss: 0.30263755\n",
            "step: 97/100, e_loss: 0.28806764\n",
            "step: 98/100, e_loss: 0.29023036\n",
            "step: 99/100, e_loss: 0.28032625\n",
            "Finish Embedding Network Training\n",
            "Start Training with Supervised Loss Only\n",
            "step: 1/100, s_loss: 0.14094429\n",
            "step: 2/100, s_loss: 0.13612333\n",
            "step: 3/100, s_loss: 0.12686777\n",
            "step: 4/100, s_loss: 0.12085825\n",
            "step: 5/100, s_loss: 0.114712566\n",
            "step: 6/100, s_loss: 0.107571416\n",
            "step: 7/100, s_loss: 0.099886514\n",
            "step: 8/100, s_loss: 0.09885142\n",
            "step: 9/100, s_loss: 0.09205896\n",
            "step: 10/100, s_loss: 0.084691904\n",
            "step: 11/100, s_loss: 0.08208521\n",
            "step: 12/100, s_loss: 0.073995784\n",
            "step: 13/100, s_loss: 0.07186774\n",
            "step: 14/100, s_loss: 0.06840574\n",
            "step: 15/100, s_loss: 0.06317052\n",
            "step: 16/100, s_loss: 0.060345147\n",
            "step: 17/100, s_loss: 0.056501504\n",
            "step: 18/100, s_loss: 0.05088914\n",
            "step: 19/100, s_loss: 0.05156184\n",
            "step: 20/100, s_loss: 0.046830647\n",
            "step: 21/100, s_loss: 0.044565823\n",
            "step: 22/100, s_loss: 0.043205667\n",
            "step: 23/100, s_loss: 0.042011473\n",
            "step: 24/100, s_loss: 0.040791966\n",
            "step: 25/100, s_loss: 0.037139036\n",
            "step: 26/100, s_loss: 0.035987847\n",
            "step: 27/100, s_loss: 0.03367852\n",
            "step: 28/100, s_loss: 0.03225383\n",
            "step: 29/100, s_loss: 0.030659817\n",
            "step: 30/100, s_loss: 0.030288778\n",
            "step: 31/100, s_loss: 0.027041959\n",
            "step: 32/100, s_loss: 0.026235413\n",
            "step: 33/100, s_loss: 0.025559144\n",
            "step: 34/100, s_loss: 0.02413858\n",
            "step: 35/100, s_loss: 0.023704547\n",
            "step: 36/100, s_loss: 0.023290109\n",
            "step: 37/100, s_loss: 0.02127419\n",
            "step: 38/100, s_loss: 0.020631742\n",
            "step: 39/100, s_loss: 0.018656438\n",
            "step: 40/100, s_loss: 0.018178578\n",
            "step: 41/100, s_loss: 0.017698463\n",
            "step: 42/100, s_loss: 0.01694124\n",
            "step: 43/100, s_loss: 0.015725996\n",
            "step: 44/100, s_loss: 0.01548548\n",
            "step: 45/100, s_loss: 0.015128563\n",
            "step: 46/100, s_loss: 0.014702286\n",
            "step: 47/100, s_loss: 0.013889188\n",
            "step: 48/100, s_loss: 0.013980933\n",
            "step: 49/100, s_loss: 0.012963483\n",
            "step: 50/100, s_loss: 0.012958267\n",
            "step: 51/100, s_loss: 0.012150721\n",
            "step: 52/100, s_loss: 0.012551236\n",
            "step: 53/100, s_loss: 0.011707248\n",
            "step: 54/100, s_loss: 0.011312173\n",
            "step: 55/100, s_loss: 0.011246828\n",
            "step: 56/100, s_loss: 0.011356784\n",
            "step: 57/100, s_loss: 0.01023017\n",
            "step: 58/100, s_loss: 0.009928182\n",
            "step: 59/100, s_loss: 0.0097968215\n",
            "step: 60/100, s_loss: 0.009761779\n",
            "step: 61/100, s_loss: 0.009007847\n",
            "step: 62/100, s_loss: 0.008583456\n",
            "step: 63/100, s_loss: 0.008489521\n",
            "step: 64/100, s_loss: 0.0080841845\n",
            "step: 65/100, s_loss: 0.008315491\n",
            "step: 66/100, s_loss: 0.007948001\n",
            "step: 67/100, s_loss: 0.007786626\n",
            "step: 68/100, s_loss: 0.007605787\n",
            "step: 69/100, s_loss: 0.0075546075\n",
            "step: 70/100, s_loss: 0.0071688592\n",
            "step: 71/100, s_loss: 0.006823047\n",
            "step: 72/100, s_loss: 0.0066865915\n",
            "step: 73/100, s_loss: 0.006511423\n",
            "step: 74/100, s_loss: 0.006135963\n",
            "step: 75/100, s_loss: 0.0061216177\n",
            "step: 76/100, s_loss: 0.00597527\n",
            "step: 77/100, s_loss: 0.005794568\n",
            "step: 78/100, s_loss: 0.0057007633\n",
            "step: 79/100, s_loss: 0.0055359844\n",
            "step: 80/100, s_loss: 0.0053790146\n",
            "step: 81/100, s_loss: 0.0050867484\n",
            "step: 82/100, s_loss: 0.004976589\n",
            "step: 83/100, s_loss: 0.0048336475\n",
            "step: 84/100, s_loss: 0.004774632\n",
            "step: 85/100, s_loss: 0.0046271267\n",
            "step: 86/100, s_loss: 0.0045652576\n",
            "step: 87/100, s_loss: 0.004357091\n",
            "step: 88/100, s_loss: 0.0042285863\n",
            "step: 89/100, s_loss: 0.0041273013\n",
            "step: 90/100, s_loss: 0.0042855204\n",
            "step: 91/100, s_loss: 0.004083606\n",
            "step: 92/100, s_loss: 0.0037895853\n",
            "step: 93/100, s_loss: 0.0038372877\n",
            "step: 94/100, s_loss: 0.0036501114\n",
            "step: 95/100, s_loss: 0.0035447334\n",
            "step: 96/100, s_loss: 0.0034077947\n",
            "step: 97/100, s_loss: 0.0033150075\n",
            "step: 98/100, s_loss: 0.0032708247\n",
            "step: 99/100, s_loss: 0.0032004265\n",
            "Finish Training with Supervised Loss Only\n",
            "Start Joint Training\n",
            "step: 99/100, D_loss: 2.1981056, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.2074325, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.2091365, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.206072, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.2000103, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1919599, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1826103, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1724153, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1616278, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1504138, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1389084, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1270952, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1150312, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.1027405, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.0901933, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.0774207, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.064406, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.0512874, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.0380073, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.024691, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 2.0114474, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9984541, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.985894, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9740292, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9629867, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9531173, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9445643, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.937741, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9325072, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9291059, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9274273, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9271798, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9279945, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9295657, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9311968, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9326863, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9336437, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9339688, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9335345, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9325393, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9310582, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9292464, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9273058, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9253434, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.923504, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9219116, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9205425, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9194502, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9186736, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9181443, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9177982, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9175037, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9173856, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9171816, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9170293, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9167331, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.916403, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9159353, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9153596, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9147485, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9140575, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9132907, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9124637, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9116048, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9107949, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9099742, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9091239, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9083283, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9074904, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.90662, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9056321, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9047123, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9035983, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9024205, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.9010582, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8995429, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8980219, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8963708, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8945379, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8927025, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.890711, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8885728, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.886146, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8835456, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8806206, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8771524, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8732826, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8690914, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8640808, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8586669, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.852867, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8456707, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.838191, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8292315, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8191332, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8079358, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.7947605, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.778465, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.7618061, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.7429278, G_loss_U: 1.1555902, G_loss_S: 9.754866e-06, E_loss_t0: 2.3701618\n",
            "step: 99/100, D_loss: 1.8755782, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.9932246, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 2.0344577, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.9966303, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.9021112, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7893481, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7067572, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6831319, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7194313, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7752116, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.8078583, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.8098848, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7814198, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7354101, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6926813, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6688325, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6710038, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6887685, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7043993, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.7111017, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.702841, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6802349, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.656806, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6369172, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.627393, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6292927, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.639482, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6367218, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6264875, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.6065464, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5924586, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5876682, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.592405, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5943205, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5902878, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5822377, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5721056, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5632052, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5637128, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5642674, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5598075, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5529032, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5453838, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5441339, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5418537, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.537783, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5356561, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5269676, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5230379, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5211936, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5191631, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5173419, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5108311, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5056467, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5049689, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5026037, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4963329, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4943767, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4905672, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4894711, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.486196, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4827981, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4783431, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4778571, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4766992, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.470391, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4688576, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4659536, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4607253, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4625522, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4556668, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4546167, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4527708, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4470607, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4454806, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4450066, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4404867, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4377105, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4324559, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4370217, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4324776, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4305634, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4251868, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4168051, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4131812, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4098333, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4148465, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4130714, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4054638, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.395019, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.390485, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.388972, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4051903, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.4334027, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5867105, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.66797, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.5774999, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.392333, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 2.0602381, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 1.440872, G_loss_U: 1.4427422, G_loss_S: 9.671402e-06, E_loss_t0: 2.3449872\n",
            "step: 99/100, D_loss: 2.0377424, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 2.1952603, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 2.4353619, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 2.496382, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 2.2605147, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 2.0677204, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.9389734, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.8210342, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.6981066, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5688398, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4443125, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4691355, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.7677922, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5869966, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4204541, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4472228, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4856452, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5120302, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.528232, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5366483, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5371369, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5361813, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5277951, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5157522, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5025755, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4915302, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4774964, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4669394, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4569787, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4530737, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4504846, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4519361, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4576136, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.460844, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.468962, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4676723, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4646686, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.458605, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4548084, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4536029, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.448236, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4445038, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4469435, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4463906, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4496368, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4471395, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4486091, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4427671, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4467617, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.442859, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4443477, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4383621, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4382106, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4406719, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.434089, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4329604, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4348351, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4369559, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4336336, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4323789, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4350082, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4314286, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4289501, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4250546, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4268619, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4245014, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4269981, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4223577, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4212978, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4250647, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4212236, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4211454, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4181654, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4147773, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4147568, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4171085, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4148349, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4128362, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4196819, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4166783, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4112035, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4105139, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4106381, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4068867, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.407108, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4131836, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4090945, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4044569, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.4049758, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3960543, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3985941, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.398902, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3988847, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3987558, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3961449, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3890637, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3920218, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3894241, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3950368, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.3894181, G_loss_U: 0.12568097, G_loss_S: 1.0278163e-05, E_loss_t0: 2.4223657\n",
            "step: 99/100, D_loss: 1.5400645, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.6227462, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.6787449, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.7199426, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.7486768, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.7658002, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.7721386, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.759462, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.7304267, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.6933619, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.6573337, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.620298, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.5834384, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.5512633, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.5219586, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4903858, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.464854, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4450375, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4281876, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4110802, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4086661, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4199517, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4290755, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4426974, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4482177, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4563973, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4297389, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4207984, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4289755, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.409397, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4077438, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4062198, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4120251, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4161735, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4097369, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.408618, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4115129, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4125515, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4146867, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4136997, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4088452, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4075559, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4100484, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4021343, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4055882, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4013413, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3992367, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4006189, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.4001858, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3989693, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3996812, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3939244, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3934491, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3938823, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3940961, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3945435, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3895268, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3944536, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3921858, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.389889, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3832903, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3739455, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3733197, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3765687, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3772796, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3683705, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3763193, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.376143, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3712299, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3806131, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3692015, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3642956, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3624773, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3558311, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3477554, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3571144, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3513638, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.353301, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3464234, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3367852, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3300798, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3309201, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3316442, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3235416, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3149968, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3110712, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.3157408, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2958733, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2940868, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2932436, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.296609, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2712537, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2783844, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2760438, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2578675, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2560464, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.211962, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2314417, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.2056953, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.172341, G_loss_U: 1.0729802, G_loss_S: 9.633419e-06, E_loss_t0: 2.3612993\n",
            "step: 99/100, D_loss: 1.5727068, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.6599661, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.9296091, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 2.171723, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.7572185, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.6799794, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.6400352, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.5998418, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.554933, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.5029538, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4509935, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.382869, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3077703, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.9790286, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3539999, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4105654, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4525801, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4668417, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4781435, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4826283, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4856082, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4864407, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4841174, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4810174, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4762768, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4722774, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.46522, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4596344, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4508212, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4408531, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.432272, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4236383, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4135258, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4071512, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4008377, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3946022, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3894958, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.384197, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.379552, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.379769, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3770695, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.373472, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3733886, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.364097, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.362974, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3643031, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3679019, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3538656, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3551074, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3496201, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3520801, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3487017, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3282872, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3193763, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3283951, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2988591, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3092175, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3325913, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2873906, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.3012555, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2692592, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2544639, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.245883, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2601362, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2152618, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2142063, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.2047365, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1822723, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1946858, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.234831, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1842834, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1940293, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1568594, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1779974, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1583624, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.124466, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.1076397, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.0375024, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.0789722, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.0630372, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.0200747, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.96609354, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.9855926, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.007704, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.97301507, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.9469866, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.96941185, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.91313493, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.89898115, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.8619712, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.7876639, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.9180244, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.9172337, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.841431, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.8607068, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.038244, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.6686623, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.9652384, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.67732847, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 0.83423334, G_loss_U: 0.9069416, G_loss_S: 9.988318e-06, E_loss_t0: 2.3972416\n",
            "step: 99/100, D_loss: 1.4874952, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.80381024, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 1.1593877, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.76763517, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.80864346, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7546734, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7241786, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.73201555, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.73602784, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7671374, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7583723, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.77571744, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7303118, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.62811625, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7630729, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.75962764, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.6533345, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.80183816, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7262732, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.72624224, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.91609424, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.58228225, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 1.1696346, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.64538705, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.97047126, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.79736567, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5854921, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 1.3149651, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.62435037, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.65147406, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.86541444, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7492035, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5355918, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.94744116, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.67720366, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.9669739, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.86049837, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.70082337, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5538501, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 1.0060731, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5576734, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7634901, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.78489935, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.873583, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5899375, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.61843425, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.8419411, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.6167775, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5764889, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.6943611, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.64481986, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.61650866, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5589328, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7204631, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.53944147, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.46621874, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5389353, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.7241158, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.6923185, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5297477, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.49820274, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5455635, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4945366, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.54191864, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.5177676, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.550319, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4744691, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.50255805, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.50813574, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.49783632, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.54813737, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.49963513, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.45278195, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4906598, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4666557, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.52060574, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.40274763, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.41225508, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.588456, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.40540674, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.48142636, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.48591217, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.32015595, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.44865647, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.440419, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4321081, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.42149568, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.56141776, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.449197, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.43764317, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.41594923, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.44799492, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.44747162, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.41473094, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.40743116, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4977652, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.33279908, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.4289282, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.42964372, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 0.35451, G_loss_U: 4.8402057, G_loss_S: 1.006671e-05, E_loss_t0: 2.3361342\n",
            "step: 99/100, D_loss: 3.2231178, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.2934954, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.774068, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 6.0644226, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 6.0243034, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.8911376, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.7154574, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.5188513, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.3124466, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 5.1027994, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 4.8941317, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 4.6893873, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 4.490796, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 4.2998085, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 4.116965, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.9432397, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.7782211, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.622007, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.4749675, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.3359656, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.2051396, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 3.0822492, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.9664707, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.8577154, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.7552319, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.6586986, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.567863, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.4816384, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.3997586, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.3216338, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.2467003, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.1745057, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.102515, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 2.0325332, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.9589872, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.8849293, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.8188075, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7699844, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7453067, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7434208, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7554811, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7800606, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7950366, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.8053705, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7990297, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.788938, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7653847, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7447821, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7221117, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.7024158, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6861982, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6706784, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6615155, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.656837, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6486375, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6469852, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6379634, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6271454, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6180892, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.6107411, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5996389, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5901442, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5752419, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5634869, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5517198, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5360643, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5334538, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5183082, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.5092851, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.506088, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4967827, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4844042, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4819103, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4688755, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.459391, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4603385, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4504337, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4395537, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4378132, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4248786, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4305781, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4182761, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4054618, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4020793, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.4033616, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3963009, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3972609, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3846815, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.382513, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.371074, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3576199, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3793346, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3576196, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3607779, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3572968, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3128815, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3127072, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3052217, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3040842, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.2959293, G_loss_U: -1.5623862, G_loss_S: 9.619397e-06, E_loss_t0: 2.3909705\n",
            "step: 99/100, D_loss: 1.3365085, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3654569, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3889391, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4026117, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4213188, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4333788, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4421734, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4543281, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4600039, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4637609, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4663275, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4667177, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4689881, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4663581, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.464, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4613386, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4550593, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4513129, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4447441, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4400151, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.434046, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4284488, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4230189, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4168396, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4102983, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.4052161, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3966788, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3901782, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3862605, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3778948, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3729513, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3615946, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3616593, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3497692, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3384569, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3279531, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3205696, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3211303, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.3028806, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.277662, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.2985728, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.2390705, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.2530322, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1839241, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1257933, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1390773, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.212428, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.2312216, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.2265506, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1326923, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1152376, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1241156, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1580248, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.1412458, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.044298, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.0895975, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.0616285, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.0409466, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.041977, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.9920958, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.98995006, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.0377873, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.93699306, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.9571538, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.9532563, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.95814824, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8771517, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.979176, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.9371897, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8738677, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.89776134, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.870669, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.81156087, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8767068, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.84422046, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.77304506, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.79220855, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8014817, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.7716992, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.78596437, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.7866111, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.80567706, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8062441, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.7421306, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.7302535, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.75605947, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.73056155, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.68912804, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.6490084, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.75673777, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.64525, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.79110736, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.8004673, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.70337003, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.6775843, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.68451077, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.6791612, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.59817314, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.5984891, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 0.7580143, G_loss_U: 1.8417751, G_loss_S: 1.0763163e-05, E_loss_t0: 2.3994696\n",
            "step: 99/100, D_loss: 1.63684, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.6840097, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.7326514, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.7738173, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.7283916, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.8825383, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.9310021, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.9442677, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.9399247, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.9246254, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.901997, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.8726044, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.8396356, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.804071, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.7649808, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.7241387, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.6814392, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.637519, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.59268, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.54513, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.4966664, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.4445028, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.387524, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.3223143, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.2441158, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 2.1295576, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.844828, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.4995115, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.4659845, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.4348637, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.396147, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.3676836, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.3382046, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.2920069, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.2488735, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.1194382, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.97014415, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.2163146, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.3484433, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.261343, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.1967986, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.9986057, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0142416, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0849735, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.1372668, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.1686733, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.1042669, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0091944, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0109096, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0737412, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.071986, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0219553, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.93722403, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.9145193, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.9958793, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 1.0031277, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.87569904, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.9398403, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.94768643, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.94011885, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.94483125, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.94581705, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.84130406, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n",
            "step: 99/100, D_loss: 0.90311587, G_loss_U: 0.53290766, G_loss_S: 1.0802405e-05, E_loss_t0: 2.373126\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-69-6182154faf78>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mTimeGAN\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparameters\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
            "\u001b[0;32m<ipython-input-67-959e77b0d4cf>\u001b[0m in \u001b[0;36mTimeGAN\u001b[0;34m(data, parameters)\u001b[0m\n\u001b[1;32m    167\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mbatch_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloader\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    168\u001b[0m       random_data = random_generator(batch_size=batch_size, z_dim=dim, \n\u001b[0;32m--> 169\u001b[0;31m                                        T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n\u001b[0m\u001b[1;32m    170\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    171\u001b[0m       \u001b[0mz\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrandom_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m<ipython-input-19-81307a2f1257>\u001b[0m in \u001b[0;36mextract_time\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m     48\u001b[0m   \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     49\u001b[0m     \u001b[0mmax_seq_len\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_seq_len\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m     \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     52\u001b[0m   \u001b[0;32mreturn\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_seq_len\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uP_YyhAZpKXl",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "942a1644-b110-4e31-d7f2-debf02a2e3f3"
      },
      "source": [
        "  hidden_dim = parameters[\"hidden_dim\"]\n",
        "  num_layers = parameters[\"num_layers\"]\n",
        "  iterations = parameters[\"iterations\"]\n",
        "  batch_size = parameters[\"batch_size\"]\n",
        "  module = parameters[\"module\"]\n",
        "  epoch = parameters[\"epoch\"]\n",
        "  no, seq_len, dim = np.asarray(data).shape\n",
        "  z_dim = dim\n",
        "  gamma = 1\n",
        "\n",
        "  Embedder = Time_GAN_module(input_size=z_dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=num_layers)\n",
        "  Recovery = Time_GAN_module(input_size=hidden_dim, output_size=dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "  Generator = Time_GAN_module(input_size=dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers)\n",
        "  Supervisor = Time_GAN_module(input_size=hidden_dim, output_size=hidden_dim, hidden_dim=hidden_dim, n_layers=n_layers-1)\n",
        "  Discriminator = Time_GAN_module(input_size=hidden_dim, output_size=1, hidden_dim=hidden_dim, n_layers=n_layers, activation=nn.Identity)\n",
        "\n",
        "  embedder_optimizer = optim.Adam(Embedder.parameters(), lr=0.001)\n",
        "  recovery_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "  supervisor_optimizer = optim.Adam(Recovery.parameters(), lr=0.001)\n",
        "  discriminator_optimizer = optim.Adam(Discriminator.parameters(), lr=0.001)\n",
        "  generator_optimizer = optim.Adam(Generator.parameters(), lr=0.001)\n",
        "\n",
        "  # Embedding Network Training\n",
        "  print('Start Embedding Network Training')\n",
        "  for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "        \n",
        "        MSE_loss = nn.MSELoss()\n",
        "        \n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        X_tilde, _ = Recovery(H)\n",
        "        X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "        E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Recovery.zero_grad()\n",
        "\n",
        "        E_loss0.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        recovery_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', e_loss: ' + str(np.sqrt(E_loss0.detach().numpy())))\n",
        "\n",
        "  print('Finish Embedding Network Training')\n",
        "\n",
        "  # Training only with supervised loss\n",
        "  print('Start Training with Supervised Loss Only')\n",
        "  for e in range(epoch): \n",
        "    for batch_index, X in enumerate(loader):\n",
        "\n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        H_hat_supervise, _ = Supervisor(H)\n",
        "        H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "        G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "\n",
        "\n",
        "        Embedder.zero_grad()\n",
        "        Supervisor.zero_grad()\n",
        "\n",
        "        G_loss_S.backward(retain_graph=True)\n",
        "\n",
        "        embedder_optimizer.step()\n",
        "        supervisor_optimizer.step()\n",
        "\n",
        "        if e in range(1,epoch) and batch_index == 0:\n",
        "            print('step: '+ str(e) + '/' + str(epoch) + ', s_loss: ' + str(np.sqrt(G_loss_S.detach().numpy())))\n",
        "\n",
        "  print('Finish Training with Supervised Loss Only')"
      ],
      "id": "uP_YyhAZpKXl",
      "execution_count": 43,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start Embedding Network Training\n",
            "step: 1/100, e_loss: 1.6018305\n",
            "step: 2/100, e_loss: 1.55982\n",
            "step: 3/100, e_loss: 1.4912727\n",
            "step: 4/100, e_loss: 1.4599437\n",
            "step: 5/100, e_loss: 1.3488882\n",
            "step: 6/100, e_loss: 1.2394172\n",
            "step: 7/100, e_loss: 1.2484871\n",
            "step: 8/100, e_loss: 1.2529289\n",
            "step: 9/100, e_loss: 1.2284856\n",
            "step: 10/100, e_loss: 1.1609265\n",
            "step: 11/100, e_loss: 1.0964484\n",
            "step: 12/100, e_loss: 0.8498254\n",
            "step: 13/100, e_loss: 0.68953836\n",
            "step: 14/100, e_loss: 0.6233022\n",
            "step: 15/100, e_loss: 0.5515359\n",
            "step: 16/100, e_loss: 0.54300267\n",
            "step: 17/100, e_loss: 0.51206124\n",
            "step: 18/100, e_loss: 0.48474026\n",
            "step: 19/100, e_loss: 0.4790923\n",
            "step: 20/100, e_loss: 0.47527474\n",
            "step: 21/100, e_loss: 0.45888883\n",
            "step: 22/100, e_loss: 0.4562261\n",
            "step: 23/100, e_loss: 0.44347155\n",
            "step: 24/100, e_loss: 0.42158148\n",
            "step: 25/100, e_loss: 0.42365658\n",
            "step: 26/100, e_loss: 0.41077211\n",
            "step: 27/100, e_loss: 0.41154054\n",
            "step: 28/100, e_loss: 0.4007569\n",
            "step: 29/100, e_loss: 0.4030361\n",
            "step: 30/100, e_loss: 0.394878\n",
            "step: 31/100, e_loss: 0.3867121\n",
            "step: 32/100, e_loss: 0.38928956\n",
            "step: 33/100, e_loss: 0.38518637\n",
            "step: 34/100, e_loss: 0.38142926\n",
            "step: 35/100, e_loss: 0.363749\n",
            "step: 36/100, e_loss: 0.3744426\n",
            "step: 37/100, e_loss: 0.36674908\n",
            "step: 38/100, e_loss: 0.34521663\n",
            "step: 39/100, e_loss: 0.36009035\n",
            "step: 40/100, e_loss: 0.3608703\n",
            "step: 41/100, e_loss: 0.34686533\n",
            "step: 42/100, e_loss: 0.3649041\n",
            "step: 43/100, e_loss: 0.34724686\n",
            "step: 44/100, e_loss: 0.3523276\n",
            "step: 45/100, e_loss: 0.3577673\n",
            "step: 46/100, e_loss: 0.3497116\n",
            "step: 47/100, e_loss: 0.3434085\n",
            "step: 48/100, e_loss: 0.3390639\n",
            "step: 49/100, e_loss: 0.35530803\n",
            "step: 50/100, e_loss: 0.33366355\n",
            "step: 51/100, e_loss: 0.34375858\n",
            "step: 52/100, e_loss: 0.34426045\n",
            "step: 53/100, e_loss: 0.33536705\n",
            "step: 54/100, e_loss: 0.33753568\n",
            "step: 55/100, e_loss: 0.33029315\n",
            "step: 56/100, e_loss: 0.3326296\n",
            "step: 57/100, e_loss: 0.32788277\n",
            "step: 58/100, e_loss: 0.33458218\n",
            "step: 59/100, e_loss: 0.3290568\n",
            "step: 60/100, e_loss: 0.33641434\n",
            "step: 61/100, e_loss: 0.3191416\n",
            "step: 62/100, e_loss: 0.32640293\n",
            "step: 63/100, e_loss: 0.31105605\n",
            "step: 64/100, e_loss: 0.32135686\n",
            "step: 65/100, e_loss: 0.32300413\n",
            "step: 66/100, e_loss: 0.32456896\n",
            "step: 67/100, e_loss: 0.3167589\n",
            "step: 68/100, e_loss: 0.32117045\n",
            "step: 69/100, e_loss: 0.31779772\n",
            "step: 70/100, e_loss: 0.31788513\n",
            "step: 71/100, e_loss: 0.31247574\n",
            "step: 72/100, e_loss: 0.3222729\n",
            "step: 73/100, e_loss: 0.32216486\n",
            "step: 74/100, e_loss: 0.3162142\n",
            "step: 75/100, e_loss: 0.323382\n",
            "step: 76/100, e_loss: 0.31210086\n",
            "step: 77/100, e_loss: 0.31600398\n",
            "step: 78/100, e_loss: 0.30378214\n",
            "step: 79/100, e_loss: 0.30717608\n",
            "step: 80/100, e_loss: 0.30637977\n",
            "step: 81/100, e_loss: 0.3019036\n",
            "step: 82/100, e_loss: 0.30292514\n",
            "step: 83/100, e_loss: 0.30799484\n",
            "step: 84/100, e_loss: 0.31871977\n",
            "step: 85/100, e_loss: 0.3078426\n",
            "step: 86/100, e_loss: 0.30697533\n",
            "step: 87/100, e_loss: 0.2954566\n",
            "step: 88/100, e_loss: 0.2938855\n",
            "step: 89/100, e_loss: 0.32049143\n",
            "step: 90/100, e_loss: 0.29602164\n",
            "step: 91/100, e_loss: 0.28824544\n",
            "step: 92/100, e_loss: 0.29850745\n",
            "step: 93/100, e_loss: 0.29717103\n",
            "step: 94/100, e_loss: 0.3118845\n",
            "step: 95/100, e_loss: 0.3135977\n",
            "step: 96/100, e_loss: 0.30650514\n",
            "step: 97/100, e_loss: 0.288569\n",
            "step: 98/100, e_loss: 0.28989348\n",
            "step: 99/100, e_loss: 0.29549134\n",
            "Finish Embedding Network Training\n",
            "Start Training with Supervised Loss Only\n",
            "step: 1/100, s_loss: 0.10818296\n",
            "step: 2/100, s_loss: 0.10493532\n",
            "step: 3/100, s_loss: 0.097637504\n",
            "step: 4/100, s_loss: 0.09351898\n",
            "step: 5/100, s_loss: 0.08887326\n",
            "step: 6/100, s_loss: 0.084249556\n",
            "step: 7/100, s_loss: 0.08233826\n",
            "step: 8/100, s_loss: 0.07572724\n",
            "step: 9/100, s_loss: 0.07295896\n",
            "step: 10/100, s_loss: 0.0684145\n",
            "step: 11/100, s_loss: 0.06639816\n",
            "step: 12/100, s_loss: 0.061089225\n",
            "step: 13/100, s_loss: 0.05982351\n",
            "step: 14/100, s_loss: 0.05463554\n",
            "step: 15/100, s_loss: 0.05182107\n",
            "step: 16/100, s_loss: 0.049826745\n",
            "step: 17/100, s_loss: 0.048567005\n",
            "step: 18/100, s_loss: 0.044549093\n",
            "step: 19/100, s_loss: 0.04294283\n",
            "step: 20/100, s_loss: 0.040436093\n",
            "step: 21/100, s_loss: 0.037693523\n",
            "step: 22/100, s_loss: 0.037398078\n",
            "step: 23/100, s_loss: 0.03584957\n",
            "step: 24/100, s_loss: 0.032504782\n",
            "step: 25/100, s_loss: 0.03247154\n",
            "step: 26/100, s_loss: 0.02992317\n",
            "step: 27/100, s_loss: 0.02881294\n",
            "step: 28/100, s_loss: 0.028586319\n",
            "step: 29/100, s_loss: 0.026487172\n",
            "step: 30/100, s_loss: 0.02627775\n",
            "step: 31/100, s_loss: 0.025274536\n",
            "step: 32/100, s_loss: 0.024114208\n",
            "step: 33/100, s_loss: 0.02360426\n",
            "step: 34/100, s_loss: 0.023108775\n",
            "step: 35/100, s_loss: 0.021612221\n",
            "step: 36/100, s_loss: 0.020247683\n",
            "step: 37/100, s_loss: 0.020620504\n",
            "step: 38/100, s_loss: 0.019110298\n",
            "step: 39/100, s_loss: 0.019379243\n",
            "step: 40/100, s_loss: 0.01847204\n",
            "step: 41/100, s_loss: 0.01761234\n",
            "step: 42/100, s_loss: 0.016968433\n",
            "step: 43/100, s_loss: 0.016801627\n",
            "step: 44/100, s_loss: 0.015742982\n",
            "step: 45/100, s_loss: 0.015525972\n",
            "step: 46/100, s_loss: 0.015359331\n",
            "step: 47/100, s_loss: 0.014493372\n",
            "step: 48/100, s_loss: 0.014688877\n",
            "step: 49/100, s_loss: 0.013528825\n",
            "step: 50/100, s_loss: 0.0132684475\n",
            "step: 51/100, s_loss: 0.013099709\n",
            "step: 52/100, s_loss: 0.012771256\n",
            "step: 53/100, s_loss: 0.012097522\n",
            "step: 54/100, s_loss: 0.011935487\n",
            "step: 55/100, s_loss: 0.011637087\n",
            "step: 56/100, s_loss: 0.011675934\n",
            "step: 57/100, s_loss: 0.010994293\n",
            "step: 58/100, s_loss: 0.010775758\n",
            "step: 59/100, s_loss: 0.010755676\n",
            "step: 60/100, s_loss: 0.010340911\n",
            "step: 61/100, s_loss: 0.010073082\n",
            "step: 62/100, s_loss: 0.009580432\n",
            "step: 63/100, s_loss: 0.009446489\n",
            "step: 64/100, s_loss: 0.009002541\n",
            "step: 65/100, s_loss: 0.00857441\n",
            "step: 66/100, s_loss: 0.008657759\n",
            "step: 67/100, s_loss: 0.008466622\n",
            "step: 68/100, s_loss: 0.008071647\n",
            "step: 69/100, s_loss: 0.007979972\n",
            "step: 70/100, s_loss: 0.007569215\n",
            "step: 71/100, s_loss: 0.0075846226\n",
            "step: 72/100, s_loss: 0.0070887255\n",
            "step: 73/100, s_loss: 0.006816514\n",
            "step: 74/100, s_loss: 0.006503068\n",
            "step: 75/100, s_loss: 0.0065890597\n",
            "step: 76/100, s_loss: 0.006341106\n",
            "step: 77/100, s_loss: 0.0063390443\n",
            "step: 78/100, s_loss: 0.005861384\n",
            "step: 79/100, s_loss: 0.0056449156\n",
            "step: 80/100, s_loss: 0.00548618\n",
            "step: 81/100, s_loss: 0.0055482606\n",
            "step: 82/100, s_loss: 0.0052849045\n",
            "step: 83/100, s_loss: 0.0050362716\n",
            "step: 84/100, s_loss: 0.0049876003\n",
            "step: 85/100, s_loss: 0.0047601247\n",
            "step: 86/100, s_loss: 0.004721197\n",
            "step: 87/100, s_loss: 0.0045680245\n",
            "step: 88/100, s_loss: 0.0042868736\n",
            "step: 89/100, s_loss: 0.0042642104\n",
            "step: 90/100, s_loss: 0.0041556996\n",
            "step: 91/100, s_loss: 0.0039212457\n",
            "step: 92/100, s_loss: 0.0038937374\n",
            "step: 93/100, s_loss: 0.0037632682\n",
            "step: 94/100, s_loss: 0.0036107306\n",
            "step: 95/100, s_loss: 0.00352756\n",
            "step: 96/100, s_loss: 0.0033645118\n",
            "step: 97/100, s_loss: 0.0033984582\n",
            "step: 98/100, s_loss: 0.0033026256\n",
            "step: 99/100, s_loss: 0.0031760319\n",
            "Finish Training with Supervised Loss Only\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 783
        },
        "id": "GtJjmjYDFR28",
        "outputId": "1787e47c-3478-4f7c-80bc-22f2b4f5822f"
      },
      "source": [
        "  print('Start Joint Training')\n",
        "  for itt in range(epoch):\n",
        "    for batch_index, X in enumerate(loader):\n",
        "      # X = next(iter(loader))\n",
        "      random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "        \n",
        "      # Generator Training \n",
        "      ## Train Generator\n",
        "      z = torch.tensor(random_data)\n",
        "      z = z.float()\n",
        "        \n",
        "      e_hat, _ = Generator(z)\n",
        "      e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      H_hat, _ = Supervisor(e_hat)\n",
        "      H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      Y_fake = Discriminator(H_hat)\n",
        "      Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "      x_hat, _ = Recovery(H_hat)\n",
        "      x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "        \n",
        "      H, _ = Embedder(X.float())\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      H_hat_supervise, _ = Supervisor(H)\n",
        "      H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Generator.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "      Discriminator.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "\n",
        "      # line 267 of original implementation: \n",
        "      # G_loss_U, G_loss_S, G_loss_V\n",
        "      G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "        \n",
        "      G_loss_U = binary_cross_entropy_loss(torch.ones_like(Y_fake), Y_fake)\n",
        "        \n",
        "      G_loss_V1 = torch.mean(torch.abs((torch.std(x_hat, [0], unbiased = False)) + 1e-6 - (torch.std(X, [0]) + 1e-6)))\n",
        "      G_loss_V2 = torch.mean(torch.abs((torch.mean(x_hat, [0]) - (torch.mean(X, [0])))))\n",
        "      G_loss_V = G_loss_V1 + G_loss_V2\n",
        "        \n",
        "      # doing a backward step for each loss should result in gradients accumulating \n",
        "      # so we should be able to optimize them jointly\n",
        "      G_loss_S.backward(retain_graph=True)#\n",
        "      G_loss_U.backward(retain_graph=True)\n",
        "      G_loss_V.backward(retain_graph=True)#\n",
        "\n",
        "\n",
        "      generator_optimizer.step()\n",
        "      supervisor_optimizer.step()\n",
        "      discriminator_optimizer.step()\n",
        "      # Train Embedder \n",
        "      ## line 270: we only optimize E_loss_T0\n",
        "      ## E_loss_T0 = just mse of x and x_tilde\n",
        "      # but it calls E_solver which optimizes E_loss, which is a sum of \n",
        "      # E_loss0 and 0.1* G_loss_S\n",
        "      MSE_loss = nn.MSELoss()\n",
        "        \n",
        "      H, _ = Embedder(X.float())\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      X_tilde, _ = Recovery(H)\n",
        "      X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "      E_loss_T0 = MSE_loss(X, X_tilde)\n",
        "      E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "        \n",
        "      H_hat_supervise, _ = Supervisor(H)\n",
        "      H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "      G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "      E_loss = E_loss0  + 0.1 * G_loss_S\n",
        "        \n",
        "      G_loss_S.backward(retain_graph=True)\n",
        "      E_loss_T0.backward()\n",
        "        \n",
        "      Embedder.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "        \n",
        "      embedder_optimizer.step()\n",
        "      recovery_optimizer.step()\n",
        "      supervisor_optimizer.step()\n",
        "    # train Discriminator\n",
        "    for batch_index, X in enumerate(loader):\n",
        "      random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "      \n",
        "      z = torch.tensor(random_data)\n",
        "      z = z.float()\n",
        "\n",
        "      H, _ = Embedder(X)\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Y_real = Discriminator(H)\n",
        "      Y_real = torch.reshape(Y_real, (batch_size, seq_len, 1))\n",
        "      \n",
        "      e_hat, _ = Generator(z)\n",
        "      e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Y_fake_e = Discriminator(e_hat)\n",
        "      Y_fake_e = torch.reshape(Y_fake_e, (batch_size, seq_len, 1))\n",
        "        \n",
        "      H_hat, _ = Supervisor(e_hat)\n",
        "      H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      Y_fake = Discriminator(H_hat)\n",
        "      Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "      x_hat, _ = Recovery(H_hat)\n",
        "      x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "\n",
        "      Generator.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "      Discriminator.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "      Embedder.zero_grad()\n",
        "\n",
        "      D_loss_real = nn.BCEWithLogitsLoss()\n",
        "      # i think we need the order: logits, targets \n",
        "      # https://discuss.pytorch.org/t/v1-0-1-nn-bcewithlogitsloss-returns-negative-loss-sigmoid-layer-not-deployed/57409/3\n",
        "      DLR = D_loss_real(Y_real, torch.ones_like(Y_real))\n",
        "      D_loss_fake = nn.BCEWithLogitsLoss()\n",
        "      DLF = D_loss_fake(torch.zeros_like(Y_fake), Y_fake)\n",
        "      D_loss_fake_e = nn.BCEWithLogitsLoss()\n",
        "      DLF_e = D_loss_fake_e(torch.zeros_like(Y_fake_e), Y_fake_e)\n",
        "      D_loss = DLR + DLF + gamma * DLF_e\n",
        "\n",
        "      D_loss.backward(retain_graph=True)\n",
        "      \n",
        "      discriminator_optimizer.step()\n",
        "\n",
        "      # check discriminator loss before updating\n",
        "      #check_d_loss = D_loss\n",
        "      #if (check_d_loss > 0.15):\n",
        "      #  D_loss.backward(retain_graph=True)\n",
        "      #  discriminator_optimizer.step()\n",
        "      \n",
        "      print('step: '+ str(e) + '/' + str(epoch) + \n",
        "            ', D_loss: ' + str(D_loss.detach().numpy()) +\n",
        "            ', G_loss_U: ' + str(G_loss_U.detach().numpy()) + \n",
        "            ', G_loss_S: ' + str(G_loss_S.detach().numpy()) + \n",
        "            ', E_loss_t0: ' + str(np.sqrt(E_loss0.detach().numpy()))\n",
        "             )\n",
        "  print('Finish Joint Training')"
      ],
      "id": "GtJjmjYDFR28",
      "execution_count": 45,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Start Joint Training\n",
            "step: 99/100, D_loss: -2.7969604, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.8291883, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.8611054, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.8930597, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.9248323, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.956499, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -2.9881005, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.0196328, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.0511103, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.0824876, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.1137981, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.145081, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.176251, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.2072897, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.2383351, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.2693243, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.300127, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.3309293, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.3617063, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.3925042, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n",
            "step: 99/100, D_loss: -3.4229817, G_loss_U: -4.151405, G_loss_S: 9.272844e-06, E_loss_t0: 2.4685705\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "error",
          "ename": "KeyboardInterrupt",
          "evalue": "ignored",
          "traceback": [
            "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
            "\u001b[0;32m<ipython-input-45-9a32a2d7d6d6>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m     89\u001b[0m   \u001b[0;32mfor\u001b[0m \u001b[0mbatch_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloader\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     90\u001b[0m     random_data = random_generator(batch_size=batch_size, z_dim=dim, \n\u001b[0;32m---> 91\u001b[0;31m                                      T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n\u001b[0m\u001b[1;32m     92\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     93\u001b[0m     \u001b[0mz\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrandom_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;32m<ipython-input-19-81307a2f1257>\u001b[0m in \u001b[0;36mextract_time\u001b[0;34m(data)\u001b[0m\n\u001b[1;32m     48\u001b[0m   \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     49\u001b[0m     \u001b[0mmax_seq_len\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_seq_len\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m     \u001b[0mtime\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     52\u001b[0m   \u001b[0;32mreturn\u001b[0m \u001b[0mtime\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_seq_len\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
            "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "2ckDW187GK3i",
        "outputId": "c3774480-c24a-4651-b957-c1da5194487d"
      },
      "source": [
        "      X = next(iter(loader))\n",
        "      random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "        \n",
        "      # Generator Training \n",
        "      ## Train Generator\n",
        "      z = torch.tensor(random_data)\n",
        "      z = z.float()\n",
        "        \n",
        "      e_hat, _ = Generator(z)\n",
        "      e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      H_hat, _ = Supervisor(e_hat)\n",
        "      H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "      Y_fake = Discriminator(H_hat)\n",
        "      Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "      x_hat, _ = Recovery(H_hat)\n",
        "      x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "        \n",
        "      H, _ = Embedder(X.float())\n",
        "      H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      H_hat_supervise, _ = Supervisor(H)\n",
        "      H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "      Generator.zero_grad()\n",
        "      Supervisor.zero_grad()\n",
        "      Discriminator.zero_grad()\n",
        "      Recovery.zero_grad()\n",
        "\n",
        "      # line 267 of original implementation: \n",
        "      # G_loss_U, G_loss_S, G_loss_V\n",
        "      G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "      print(G_loss_S)\n",
        "      print(Y_fake.shape)\n",
        "      # G_loss_U returns negative values? \n",
        "      G_loss_U = binary_cross_entropy_loss(torch.ones_like(Y_fake), Y_fake)\n",
        "      print(G_loss_U)\n",
        "      G_loss_V1 = torch.mean(torch.abs((torch.std(x_hat, [0], unbiased = False)) + 1e-6 - (torch.std(X, [0]) + 1e-6)))\n",
        "      G_loss_V2 = torch.mean(torch.abs((torch.mean(x_hat, [0]) - (torch.mean(X, [0])))))\n",
        "      G_loss_V = G_loss_V1 + G_loss_V2\n",
        "      print(G_loss_V)\n",
        "      # doing a backward step for each loss should result in gradients accumulating \n",
        "      # so we should be able to optimize them jointly\n",
        "      G_loss_S.backward(retain_graph=True)#\n",
        "      G_loss_U.backward(retain_graph=True)\n",
        "      G_loss_V.backward(retain_graph=True)#\n",
        "\n",
        "\n",
        "      generator_optimizer.step()\n",
        "      supervisor_optimizer.step()\n",
        "      discriminator_optimizer.step()\n",
        "      "
      ],
      "id": "2ckDW187GK3i",
      "execution_count": 47,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "tensor(9.9414e-06, grad_fn=<MseLossBackward>)\n",
            "torch.Size([128, 24, 1])\n",
            "tensor(-4.8705, grad_fn=<BinaryCrossEntropyWithLogitsBackward>)\n",
            "tensor(0.7864, grad_fn=<AddBackward0>)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "63mn6lA8GuP7",
        "outputId": "2709f954-60ca-44fe-9d35-1e703e9937f5"
      },
      "source": [
        "# Discriminator outputs values larger than 1\n",
        "Discriminator(H_hat)"
      ],
      "id": "63mn6lA8GuP7",
      "execution_count": 61,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(tensor([[0.4977],\n",
              "         [0.5011],\n",
              "         [0.5013],\n",
              "         ...,\n",
              "         [0.4923],\n",
              "         [0.4923],\n",
              "         [0.4923]], grad_fn=<SigmoidBackward>),\n",
              " tensor([[[ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577],\n",
              "          [ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577],\n",
              "          [ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577],\n",
              "          ...,\n",
              "          [ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577],\n",
              "          [ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577],\n",
              "          [ 0.3385, -0.2502, -0.0390,  ...,  0.0921, -0.1438,  0.1577]],\n",
              " \n",
              "         [[-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824],\n",
              "          [-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824],\n",
              "          [-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824],\n",
              "          ...,\n",
              "          [-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824],\n",
              "          [-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824],\n",
              "          [-0.1385,  0.3385,  0.2934,  ..., -0.1475,  0.2490,  0.0824]],\n",
              " \n",
              "         [[-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427],\n",
              "          [-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427],\n",
              "          [-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427],\n",
              "          ...,\n",
              "          [-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427],\n",
              "          [-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427],\n",
              "          [-0.1396, -0.0889,  0.0376,  ..., -0.1524, -0.1783, -0.1427]]],\n",
              "        grad_fn=<StackBackward>))"
            ]
          },
          "metadata": {},
          "execution_count": 61
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "VuAD3bmIGQUq",
        "outputId": "6797b755-30ca-41d8-805e-2f0510e05037"
      },
      "source": [
        "Discriminator = Time_GAN_module(input_size=hidden_dim, output_size=1, hidden_dim=hidden_dim, n_layers=n_layers, activation=torch.sigmoid)\n",
        "Discriminator"
      ],
      "id": "VuAD3bmIGQUq",
      "execution_count": 60,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Time_GAN_module(\n",
              "  (rnn): GRU(24, 24, num_layers=3, batch_first=True)\n",
              "  (fc): Linear(in_features=24, out_features=1, bias=True)\n",
              ")"
            ]
          },
          "metadata": {},
          "execution_count": 60
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "T3PHABnV9mBa"
      },
      "source": [
        "test = next(iter(loader))\n",
        "test.shape"
      ],
      "id": "T3PHABnV9mBa",
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "PiPaUpnzsUnj",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "b4cb916e-8c47-442d-9965-688e8735373a"
      },
      "source": [
        "D_loss_real(Y_real, torch.ones_like(Y_real))"
      ],
      "id": "PiPaUpnzsUnj",
      "execution_count": 66,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "tensor(0.0023, grad_fn=<BinaryCrossEntropyWithLogitsBackward>)"
            ]
          },
          "metadata": {},
          "execution_count": 66
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "bede8319"
      },
      "source": [
        "# outline of original training loop\n",
        "\"\"\"print('Start Joint Training')\n",
        "\n",
        "for e in range(epoch): \n",
        "\n",
        "    for batch_index, X in enumerate(loader):\n",
        "        \n",
        "        random_data = random_generator(batch_size=batch_size, z_dim=dim, \n",
        "                                       T_mb=extract_time(data)[0], max_seq_len=extract_time(data)[1])\n",
        "        \n",
        "        \n",
        "        # Generator Training \n",
        "        ## Train Generator\n",
        "        z = torch.tensor(random_data)\n",
        "        z = z.float()\n",
        "        \n",
        "        e_hat, _ = Generator(z)\n",
        "        e_hat = torch.reshape(e_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "        H_hat, _ = Supervisor(e_hat)\n",
        "        H_hat = torch.reshape(H_hat, (batch_size, seq_len, hidden_dim))\n",
        "        \n",
        "        Y_fake = Discriminator(H_hat)\n",
        "        Y_fake = torch.reshape(Y_fake, (batch_size, seq_len, 1))\n",
        "        \n",
        "        x_hat, _ = Recovery(H_hat)\n",
        "        x_hat = torch.reshape(x_hat, (batch_size, seq_len, dim))\n",
        "        \n",
        "        \n",
        "        Generator.zero_grad()\n",
        "        Supervisor.zero_grad()\n",
        "        Discriminator.zero_grad()\n",
        "        Recovery.zero_grad()\n",
        "        \n",
        "        G_loss_U = binary_cross_entropy_loss(torch.ones_like(Y_fake), Y_fake)\n",
        "        \n",
        "        G_loss_V1 = torch.mean(torch.abs((torch.std(x_hat, [0], unbiased = False)) + 1e-6 - (torch.std(X, [0]) + 1e-6)))\n",
        "        G_loss_V2 = torch.mean(torch.abs((torch.mean(x_hat, [0]) - (torch.mean(X, [0])))))\n",
        "        G_loss_V = G_loss_V1 + G_loss_V2\n",
        "        \n",
        " \n",
        "        G_loss_U.backward(retain_graph=True)\n",
        "        G_loss_V.backward()\n",
        "\n",
        "\n",
        "        generator_optimizer.step()\n",
        "        supervisor_optimizer.step()\n",
        "        discriminator_optimizer.step()\n",
        "        \n",
        "        ## Train Embedder\n",
        "        \n",
        "        MSE_loss = nn.MSELoss()\n",
        "        \n",
        "        H, _ = Embedder(X.float())\n",
        "        H = torch.reshape(H, (batch_size, seq_len, hidden_dim))\n",
        "\n",
        "        X_tilde, _ = Recovery(H)\n",
        "        X_tilde = torch.reshape(X_tilde, (batch_size, seq_len, dim))\n",
        "\n",
        "        E_loss0 = 10 * torch.sqrt(MSE_loss(X, X_tilde))  \n",
        "        \n",
        "        H_hat_supervise, _ = Supervisor(H)\n",
        "        H_hat_supervise = torch.reshape(H_hat_supervise, (batch_size, seq_len, hidden_dim))  \n",
        "\n",
        "        G_loss_S = MSE_loss(H[:,1:,:], H_hat_supervise[:,:-1,:])\n",
        "        E_loss = E_loss0  + 0.1 * G_loss_S\n",
        "        \n",
        "        G_loss_S.backward(retain_graph=True)\n",
        "        E_loss.backward()\n",
        "        \n",
        "        Embedder.zero_grad()\n",
        "        Recovery.zero_grad()\n",
        "        Supervisor.zero_grad()\n",
        "        \n",
        "        embedder_optimizer.step()\n",
        "        recovery_optimizer.step()\n",
        "        supervisor_optimizer.step()\n",
        "        \n",
        "        # Train Discriminator \n",
        "        \n",
        "        \n",
        "        \n",
        "        \n",
        "        #if e in range(1,epoch) and batch_index == 0:\n",
        "        print('step: '+ str(e) + '/' + str(epoch) + ', G_loss_U: ' + str(G_loss_U.detach().numpy()) + ', G_loss_S: ' + \n",
        "             str(G_loss_S.detach().numpy()) + ', E_loss_t0: ' + str(np.sqrt(E_loss0.detach().numpy()))\n",
        "             )\n",
        "        \n",
        "        \n",
        " \n",
        "\n",
        "\n",
        "\n",
        "    \n",
        "\n",
        "\n",
        "        \n",
        "\n",
        "print('Finish Joint Training')\"\"\""
      ],
      "id": "bede8319",
      "execution_count": null,
      "outputs": []
    }
  ]
}
