{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import sys\n",
    "from sklearn.preprocessing import normalize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "xy = np.loadtxt('C:\\\\Users\\\\SANHA\\\\Desktop\\\\new_data.csv', delimiter=',', dtype=np.float32)\n",
    "x_data=xy[:,0:-1]\n",
    "x_data = normalize(x_data, axis=0, norm='max')\n",
    "xy[:,0:-1]=x_data\n",
    "y_data=xy[:,[-1]]\n",
    "m_data=xy[:499,:]\n",
    "r_data=xy[:10000,:]\n",
    "g_data=xy[:0,:]\n",
    "F_data=xy[:,:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Model Inputs\n",
    "def model_inputs(real_dim, noise_dim):\n",
    "    inputs_real_ = tf.placeholder(tf.float32, shape=[None, real_dim], name='inputs_real')\n",
    "    inputs_z_ = tf.placeholder(tf.float32, shape=[None, noise_dim], name='inputs_z')\n",
    "    \n",
    "    return inputs_real_, inputs_z_\n",
    "\n",
    "def leaky_relu(x, alpha):\n",
    "    return tf.maximum(alpha * x, x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generator Network\n",
    "def model_generator(z_input, out_dim, n_units=128, reuse=False, alpha=0.01):\n",
    "    # used to reuse variables, name scope\n",
    "    with tf.variable_scope('generator', reuse=reuse):\n",
    "        hidden_layer = tf.layers.dense(z_input, n_units, activation=None)\n",
    "        hidden_layer = leaky_relu(hidden_layer, alpha)\n",
    "        \n",
    "        logits = tf.layers.dense(hidden_layer, out_dim, activation=None)\n",
    "        outputs = tf.nn.sigmoid(logits)\n",
    "        \n",
    "        return outputs, logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Discriminator Network\n",
    "def model_discriminator(input, n_units=128, reuse=False, alpha=0.1):\n",
    "    with tf.variable_scope('discriminator', reuse=reuse):\n",
    "        hidden_layer = tf.layers.dense(input, n_units, activation=tf.nn.relu)\n",
    "        #hidden_layer = leaky_relu(hidden_layer, alpha)\n",
    "        \n",
    "        logits = tf.layers.dense(hidden_layer, 1, activation=None)\n",
    "        outputs = tf.nn.sigmoid(logits)\n",
    "        \n",
    "        return outputs, logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#parameter\n",
    "input_size = 42\n",
    "z_dim = 21\n",
    "g_hidden_size = 128\n",
    "d_hidden_size = 128\n",
    "alpha = 0.1\n",
    "smooth = 0.1\n",
    "learning_rate = 0.001"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "tf.reset_default_graph()  # If we don't have this, as we call this block over and over, the graph gets bigger and bigger\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "    inputs_real, inputs_z = model_inputs(input_size, z_dim)\n",
    "    \n",
    "    g_outputs, g_logits = model_generator(inputs_z, input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)\n",
    "    \n",
    "    d_outputs_real, d_logits_real = model_discriminator(inputs_real, n_units=d_hidden_size, reuse=False, alpha=alpha)\n",
    "    d_outputs_fake, d_logits_fake = model_discriminator(g_outputs, n_units=d_hidden_size, reuse=True, alpha=alpha)\n",
    "    \n",
    "    d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1-smooth)))\n",
    "    d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))\n",
    "    \n",
    "    d_loss = d_loss_real + d_loss_fake\n",
    "    g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))\n",
    "    \n",
    "    t_vars = tf.trainable_variables()\n",
    "    g_vars = [variable for variable in t_vars if 'generator' in variable.name]\n",
    "    d_vars = [variable for variable in t_vars if 'discriminator' in variable.name]\n",
    "    \n",
    "    # Affected Variables with var_list\n",
    "    d_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(d_loss, var_list=d_vars)\n",
    "    g_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(g_loss, var_list=g_vars)\n",
    "    \n",
    "    # Saving variables with var_list\n",
    "    saver = tf.train.Saver(var_list=g_vars)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 / 100000: 1.054104, 1.193582Generating Complete. normal=1, abnormal=0\n"
     ]
    }
   ],
   "source": [
    "samples=[]\n",
    "normal=0\n",
    "abnormal=0\n",
    "\n",
    "with tf.Session(graph=graph) as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    f = open('C:\\\\Users\\\\SANHA\\\\Desktop\\\\gen_sample3333.txt', 'a+')\n",
    "    for step in range(1):\n",
    "        \n",
    "        \n",
    "        batch_images = F_data[step].reshape([1, 42])\n",
    "        batch_z = np.random.uniform(-1, 1, size=[1, z_dim])\n",
    "        \n",
    "        _ = sess.run(d_optimizer, feed_dict={inputs_real : batch_images, inputs_z : batch_z})\n",
    "        _ = sess.run(g_optimizer, feed_dict={inputs_z : batch_z})\n",
    "        loss_d, loss_g = sess.run([d_loss, g_loss], feed_dict={inputs_real : batch_images, inputs_z : batch_z})\n",
    "        #if step%1000==0:\n",
    "        #    print('step {} / {} Complete. D_Loss : {:0.3f}, G_Loss : {:0.3f}'.format(step+1, 100000, loss_d, loss_g))\n",
    "        sys.stdout.write(\"\\r%d / %d: %f, %f\" % (step, 100000, loss_d, loss_g))\n",
    "        sys.stdout.flush()\n",
    "        sample_z = np.random.uniform(-1, 1, size=[1, z_dim])  # 16 Samples each epoch\n",
    "        gen_samples, _ = sess.run(model_generator(inputs_z, input_size, reuse=True), feed_dict={inputs_z : sample_z})\n",
    "        \n",
    "        temp=gen_samples[0,41]\n",
    "        #print(temp)\n",
    "        if temp>=0.5:\n",
    "            gen_samples[0,41]=1\n",
    "            abnormal+=1\n",
    "        else :\n",
    "            gen_samples[0,41]=0\n",
    "            normal+=1\n",
    "        #print(temp,gen_samples[0,41])\n",
    "        #write for text to csv\n",
    "        f.write(\"%f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f \\n\" %(gen_samples[0,0],gen_samples[0,1],gen_samples[0,2],gen_samples[0,3],gen_samples[0,4],gen_samples[0,5],gen_samples[0,6],gen_samples[0,7],gen_samples[0,8],gen_samples[0,9],gen_samples[0,10],gen_samples[0,11],gen_samples[0,12],gen_samples[0,13],gen_samples[0,14],gen_samples[0,15],gen_samples[0,16],gen_samples[0,17],gen_samples[0,18],gen_samples[0,19],gen_samples[0,20],gen_samples[0,21],gen_samples[0,22],gen_samples[0,23],gen_samples[0,24],gen_samples[0,25],gen_samples[0,26],gen_samples[0,27],gen_samples[0,28],gen_samples[0,29],gen_samples[0,30],gen_samples[0,31],gen_samples[0,32],gen_samples[0,33],gen_samples[0,34],gen_samples[0,35],gen_samples[0,36],gen_samples[0,37],gen_samples[0,38],gen_samples[0,39],gen_samples[0,40],gen_samples[0,41]))\n",
    "       \n",
    "        m_data=np.append(m_data,gen_samples,axis=0)\n",
    "        g_data=np.append(g_data,gen_samples,axis=0)\n",
    "       \n",
    "        \n",
    "    print('Generating Complete. normal={}, abnormal={}'.format(normal,abnormal))\n",
    "    f.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step:     0\tLoss: 0.851\tAcc: 52.35%\n",
      "step:    10\tLoss: 0.233\tAcc: 90.19%\n",
      "step:    20\tLoss: 0.215\tAcc: 91.28%\n",
      "step:    30\tLoss: 0.215\tAcc: 91.31%\n",
      "step:    40\tLoss: 0.217\tAcc: 91.40%\n",
      "step:    50\tLoss: 0.211\tAcc: 91.77%\n",
      "step:    60\tLoss: 0.217\tAcc: 91.82%\n",
      "step:    70\tLoss: 0.221\tAcc: 91.88%\n",
      "step:    80\tLoss: 0.227\tAcc: 91.85%\n",
      "step:    90\tLoss: 0.230\tAcc: 91.87%\n",
      "step:   100\tLoss: 0.234\tAcc: 91.81%\n",
      "step:   110\tLoss: 0.238\tAcc: 91.77%\n",
      "step:   120\tLoss: 0.242\tAcc: 91.78%\n",
      "step:   130\tLoss: 0.247\tAcc: 91.80%\n",
      "step:   140\tLoss: 0.252\tAcc: 91.79%\n",
      "step:   150\tLoss: 0.257\tAcc: 91.72%\n",
      "step:   160\tLoss: 0.263\tAcc: 91.67%\n",
      "step:   170\tLoss: 0.269\tAcc: 91.54%\n",
      "step:   180\tLoss: 0.275\tAcc: 91.35%\n",
      "step:   190\tLoss: 0.282\tAcc: 91.24%\n",
      "step:   200\tLoss: 0.288\tAcc: 91.09%\n",
      "step:   210\tLoss: 0.295\tAcc: 90.96%\n",
      "step:   220\tLoss: 0.302\tAcc: 90.88%\n",
      "step:   230\tLoss: 0.308\tAcc: 90.85%\n",
      "step:   240\tLoss: 0.315\tAcc: 90.73%\n",
      "step:   250\tLoss: 0.323\tAcc: 90.59%\n",
      "step:   260\tLoss: 0.330\tAcc: 90.48%\n",
      "step:   270\tLoss: 0.338\tAcc: 90.38%\n",
      "step:   280\tLoss: 0.345\tAcc: 90.36%\n",
      "step:   290\tLoss: 0.353\tAcc: 90.29%\n",
      "step:   300\tLoss: 0.362\tAcc: 90.15%\n",
      "step:   310\tLoss: 0.372\tAcc: 90.06%\n",
      "step:   320\tLoss: 0.384\tAcc: 89.99%\n",
      "step:   330\tLoss: 0.395\tAcc: 89.85%\n",
      "step:   340\tLoss: 0.408\tAcc: 89.71%\n",
      "step:   350\tLoss: 0.419\tAcc: 89.76%\n",
      "step:   360\tLoss: 0.428\tAcc: 89.68%\n",
      "step:   370\tLoss: 0.437\tAcc: 89.57%\n",
      "step:   380\tLoss: 0.446\tAcc: 89.63%\n",
      "step:   390\tLoss: 0.455\tAcc: 89.47%\n",
      "step:   400\tLoss: 0.462\tAcc: 89.52%\n",
      "step:   410\tLoss: 0.469\tAcc: 89.43%\n",
      "step:   420\tLoss: 0.475\tAcc: 89.30%\n",
      "step:   430\tLoss: 0.480\tAcc: 89.39%\n",
      "step:   440\tLoss: 0.486\tAcc: 89.31%\n",
      "step:   450\tLoss: 0.492\tAcc: 89.19%\n",
      "step:   460\tLoss: 0.497\tAcc: 89.24%\n",
      "step:   470\tLoss: 0.502\tAcc: 89.23%\n",
      "step:   480\tLoss: 0.509\tAcc: 89.10%\n",
      "step:   490\tLoss: 0.514\tAcc: 89.08%\n",
      "step:   500\tLoss: 0.520\tAcc: 89.04%\n",
      "step:   510\tLoss: 0.525\tAcc: 89.10%\n",
      "step:   520\tLoss: 0.529\tAcc: 89.13%\n",
      "step:   530\tLoss: 0.535\tAcc: 89.03%\n",
      "step:   540\tLoss: 0.541\tAcc: 88.95%\n",
      "step:   550\tLoss: 0.546\tAcc: 88.94%\n",
      "step:   560\tLoss: 0.550\tAcc: 88.92%\n",
      "step:   570\tLoss: 0.556\tAcc: 88.87%\n",
      "step:   580\tLoss: 0.561\tAcc: 88.86%\n",
      "step:   590\tLoss: 0.566\tAcc: 88.84%\n",
      "step:   600\tLoss: 0.572\tAcc: 88.78%\n",
      "step:   610\tLoss: 0.578\tAcc: 88.67%\n",
      "step:   620\tLoss: 0.584\tAcc: 88.57%\n",
      "step:   630\tLoss: 0.588\tAcc: 88.62%\n",
      "step:   640\tLoss: 0.592\tAcc: 88.66%\n",
      "step:   650\tLoss: 0.598\tAcc: 88.55%\n",
      "step:   660\tLoss: 0.604\tAcc: 88.51%\n",
      "step:   670\tLoss: 0.609\tAcc: 88.53%\n",
      "step:   680\tLoss: 0.615\tAcc: 88.52%\n",
      "step:   690\tLoss: 0.621\tAcc: 88.51%\n",
      "step:   700\tLoss: 0.627\tAcc: 88.50%\n",
      "step:   710\tLoss: 0.633\tAcc: 88.48%\n",
      "step:   720\tLoss: 0.640\tAcc: 88.37%\n",
      "step:   730\tLoss: 0.643\tAcc: 88.48%\n",
      "step:   740\tLoss: 0.651\tAcc: 88.32%\n",
      "step:   750\tLoss: 0.655\tAcc: 88.37%\n",
      "step:   760\tLoss: 0.659\tAcc: 88.40%\n",
      "step:   770\tLoss: 0.665\tAcc: 88.38%\n",
      "step:   780\tLoss: 0.671\tAcc: 88.32%\n",
      "step:   790\tLoss: 0.676\tAcc: 88.33%\n",
      "step:   800\tLoss: 0.681\tAcc: 88.30%\n",
      "step:   810\tLoss: 0.686\tAcc: 88.28%\n",
      "step:   820\tLoss: 0.691\tAcc: 88.28%\n",
      "step:   830\tLoss: 0.697\tAcc: 88.26%\n",
      "step:   840\tLoss: 0.702\tAcc: 88.28%\n",
      "step:   850\tLoss: 0.707\tAcc: 88.24%\n",
      "step:   860\tLoss: 0.712\tAcc: 88.25%\n",
      "step:   870\tLoss: 0.716\tAcc: 88.31%\n",
      "step:   880\tLoss: 0.724\tAcc: 88.09%\n",
      "step:   890\tLoss: 0.730\tAcc: 88.03%\n",
      "step:   900\tLoss: 0.731\tAcc: 88.18%\n",
      "step:   910\tLoss: 0.733\tAcc: 88.27%\n",
      "step:   920\tLoss: 0.738\tAcc: 88.24%\n",
      "step:   930\tLoss: 0.743\tAcc: 88.25%\n",
      "step:   940\tLoss: 0.747\tAcc: 88.22%\n",
      "step:   950\tLoss: 0.752\tAcc: 88.22%\n",
      "step:   960\tLoss: 0.756\tAcc: 88.19%\n",
      "step:   970\tLoss: 0.760\tAcc: 88.21%\n",
      "step:   980\tLoss: 0.765\tAcc: 88.22%\n",
      "step:   990\tLoss: 0.769\tAcc: 88.23%\n",
      "true=19958 false: 2667 acc: 0.88\n"
     ]
    }
   ],
   "source": [
    "xy2 = np.loadtxt('C:\\\\Users\\\\SANHA\\\\Desktop\\\\gen_data.csv', delimiter=',', dtype=np.float32)\n",
    "gx_data=xy2[:,0:-1]\n",
    "gy_data=xy2[:,[-1]]\n",
    "gF_data=xy2[:,:]\n",
    "\n",
    "\n",
    "x1_data=m_data[:,0:-1]\n",
    "y1_data=m_data[:,[-1]]\n",
    "x2_data=r_data[:,0:-1]\n",
    "y2_data=r_data[:,[-1]]\n",
    "\n",
    "\n",
    "x3_data=g_data[:,0:-1]\n",
    "y3_data=g_data[:,[-1]]\n",
    "\n",
    "\n",
    "nb_classes=2\n",
    "\n",
    "X=tf.placeholder(tf.float32,[None,41])\n",
    "Y=tf.placeholder(tf.int32,[None,1])\n",
    "\n",
    "Y_one_hot=tf.one_hot(Y,nb_classes)\n",
    "Y_one_hot=tf.reshape(Y_one_hot,[-1,nb_classes])\n",
    "\n",
    "W1=tf.Variable(tf.random_normal([41,82]),name='weight1')\n",
    "b1=tf.Variable(tf.random_normal([82]),name='bias1')\n",
    "layer1=tf.sigmoid(tf.matmul(X,W1)+b1)\n",
    "\n",
    "W2=tf.Variable(tf.random_normal([82,42]),name='weight2')\n",
    "b2=tf.Variable(tf.random_normal([42]),name='bias2')\n",
    "layer2=tf.sigmoid(tf.matmul(layer1,W2)+b2)\n",
    "\n",
    "W3=tf.Variable(tf.random_normal([42,nb_classes]),name='weight3')\n",
    "b3=tf.Variable(tf.random_normal([nb_classes]),name='bias3')\n",
    "logits=tf.matmul(layer2,W3)+b3\n",
    "hypothesis=tf.nn.softmax(logits)\n",
    "\n",
    "cost_i=tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y_one_hot)\n",
    "\n",
    "cost=tf.reduce_mean(cost_i)\n",
    "optimizer=tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)\n",
    "\n",
    "prediction=tf.argmax(hypothesis,1) #가능성을 퍼센트로~~\n",
    "correct_prediction=tf.equal(prediction,tf.arg_max(Y_one_hot,1))\n",
    "accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    \n",
    "    for step in range(1000):\n",
    "        sess.run(optimizer,feed_dict={X:x_data,Y:y_data})\n",
    "        #print(\"training by gan sample\")\n",
    "        if step %10==0:\n",
    "            loss,acc=sess.run([cost,accuracy],feed_dict={X:gx_data,Y:gy_data})\n",
    "            print(\"step: {:5}\\tLoss: {:.3f}\\tAcc: {:.2%}\".format(step,loss,acc))\n",
    "  \n",
    "    tr=0\n",
    "    fa=0\n",
    "    total=0\n",
    "\n",
    "\n",
    "    pred = sess.run(prediction, feed_dict={X: gx_data})\n",
    "    for p, y in zip(pred, gy_data.flatten()):\n",
    "            if(p==int(y)):\n",
    "                tr=tr+1\n",
    "            else:\n",
    "                fa=fa+1\n",
    "            #print(\"[{}] Prediction: {} Real Y: {}\".format(p == int(y), p, int(y)))\n",
    "\n",
    "    print(\"true={} false: {} acc: {:0.2f}\".format(tr,fa,tr/(tr+fa)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
