{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "'''\n",
    "linear regression experiment, hope you can know:\n",
    "1. how to design the learning model\n",
    "2. optimize the model\n",
    "3. dealing with the dataset\n",
    "\n",
    "Original Author: Aymeric Damien\n",
    "Edited by Wei Li for ChinaHadoop Deep learning course\n",
    "Project: https://github.com/aymericdamien/TensorFlow-Examples/\n",
    "'''\n",
    "\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy\n",
    "rng = numpy.random\n",
    "\n",
    "# model params\n",
    "learning_rate = 0.02\n",
    "training_epochs = 3000\n",
    "display_step=50\n",
    "# \n",
    "train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,\n",
    "                         7.042,10.791,5.313,7.997,5.654,9.27,3.1])\n",
    "train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,\n",
    "                         2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n",
    "n_samples = train_X.shape[0]\n",
    "\n",
    "# tf Graph Input\n",
    "X = tf.placeholder(\"float\")\n",
    "Y = tf.placeholder(\"float\")\n",
    "\n",
    "# Set model weights\n",
    "W = tf.Variable(rng.randn(), name=\"weight\")\n",
    "b = tf.Variable(rng.randn(), name=\"bias\")\n",
    "\n",
    "# Construct a linear model\n",
    "pred = tf.add(tf.multiply(X, W), b)\n",
    "\n",
    "# Mean squared error\n",
    "cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)\n",
    "# Gradient descent\n",
    "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
    "\n",
    "# Initializing the variables\n",
    "init = tf.global_variables_initializer()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('Epoch:', '0050', 'cost=', '0.178423569', 'W=', 0.42291793, 'b=', -0.4734658)\n",
      "('Epoch:', '0100', 'cost=', '0.156202286', 'W=', 0.40251526, 'b=', -0.32475927)\n",
      "('Epoch:', '0150', 'cost=', '0.138855815', 'W=', 0.38448787, 'b=', -0.19336548)\n",
      "('Epoch:', '0200', 'cost=', '0.125314981', 'W=', 0.36855927, 'b=', -0.077268951)\n",
      "('Epoch:', '0250', 'cost=', '0.114744954', 'W=', 0.35448512, 'b=', 0.025311502)\n",
      "('Epoch:', '0300', 'cost=', '0.106494129', 'W=', 0.34204948, 'b=', 0.11594942)\n",
      "('Epoch:', '0350', 'cost=', '0.100053802', 'W=', 0.3310616, 'b=', 0.19603507)\n",
      "('Epoch:', '0400', 'cost=', '0.095026731', 'W=', 0.32135299, 'b=', 0.26679745)\n",
      "('Epoch:', '0450', 'cost=', '0.091103002', 'W=', 0.31277463, 'b=', 0.3293213)\n",
      "('Epoch:', '0500', 'cost=', '0.088040523', 'W=', 0.30519509, 'b=', 0.38456526)\n",
      "('Epoch:', '0550', 'cost=', '0.085650302', 'W=', 0.29849792, 'b=', 0.43337804)\n",
      "('Epoch:', '0600', 'cost=', '0.083784848', 'W=', 0.29258049, 'b=', 0.47650799)\n",
      "('Epoch:', '0650', 'cost=', '0.082329050', 'W=', 0.28735185, 'b=', 0.51461679)\n",
      "('Epoch:', '0700', 'cost=', '0.081192940', 'W=', 0.28273201, 'b=', 0.54828918)\n",
      "('Epoch:', '0750', 'cost=', '0.080306433', 'W=', 0.27865005, 'b=', 0.57804072)\n",
      "('Epoch:', '0800', 'cost=', '0.079614699', 'W=', 0.27504328, 'b=', 0.60432887)\n",
      "('Epoch:', '0850', 'cost=', '0.079074971', 'W=', 0.27185646, 'b=', 0.6275565)\n",
      "('Epoch:', '0900', 'cost=', '0.078653932', 'W=', 0.26904064, 'b=', 0.64807951)\n",
      "('Epoch:', '0950', 'cost=', '0.078325450', 'W=', 0.26655263, 'b=', 0.66621393)\n",
      "('Epoch:', '1000', 'cost=', '0.078069247', 'W=', 0.26435426, 'b=', 0.68223649)\n",
      "('Epoch:', '1050', 'cost=', '0.077869445', 'W=', 0.26241186, 'b=', 0.69639373)\n",
      "('Epoch:', '1100', 'cost=', '0.077713616', 'W=', 0.26069549, 'b=', 0.70890343)\n",
      "('Epoch:', '1150', 'cost=', '0.077592134', 'W=', 0.25917912, 'b=', 0.71995574)\n",
      "('Epoch:', '1200', 'cost=', '0.077497423', 'W=', 0.2578392, 'b=', 0.72972184)\n",
      "('Epoch:', '1250', 'cost=', '0.077423617', 'W=', 0.25665545, 'b=', 0.73834991)\n",
      "('Epoch:', '1300', 'cost=', '0.077366099', 'W=', 0.2556093, 'b=', 0.74597466)\n",
      "('Epoch:', '1350', 'cost=', '0.077321291', 'W=', 0.25468507, 'b=', 0.75271124)\n",
      "('Epoch:', '1400', 'cost=', '0.077286400', 'W=', 0.25386831, 'b=', 0.75866407)\n",
      "('Epoch:', '1450', 'cost=', '0.077259235', 'W=', 0.25314665, 'b=', 0.76392406)\n",
      "('Epoch:', '1500', 'cost=', '0.077238098', 'W=', 0.252509, 'b=', 0.76857102)\n",
      "('Epoch:', '1550', 'cost=', '0.077221632', 'W=', 0.25194564, 'b=', 0.77267736)\n",
      "('Epoch:', '1600', 'cost=', '0.077208854', 'W=', 0.25144795, 'b=', 0.77630514)\n",
      "('Epoch:', '1650', 'cost=', '0.077198923', 'W=', 0.25100803, 'b=', 0.77951139)\n",
      "('Epoch:', '1700', 'cost=', '0.077191189', 'W=', 0.25061971, 'b=', 0.78234196)\n",
      "('Epoch:', '1750', 'cost=', '0.077185199', 'W=', 0.25027612, 'b=', 0.78484607)\n",
      "('Epoch:', '1800', 'cost=', '0.077180564', 'W=', 0.24997255, 'b=', 0.78705853)\n",
      "('Epoch:', '1850', 'cost=', '0.077176966', 'W=', 0.2497045, 'b=', 0.78901207)\n",
      "('Epoch:', '1900', 'cost=', '0.077174187', 'W=', 0.24946776, 'b=', 0.79073763)\n",
      "('Epoch:', '1950', 'cost=', '0.077172041', 'W=', 0.24925858, 'b=', 0.79226238)\n",
      "('Epoch:', '2000', 'cost=', '0.077170387', 'W=', 0.24907368, 'b=', 0.7936098)\n",
      "('Epoch:', '2050', 'cost=', '0.077169113', 'W=', 0.24891038, 'b=', 0.79480028)\n",
      "('Epoch:', '2100', 'cost=', '0.077168114', 'W=', 0.24876596, 'b=', 0.79585338)\n",
      "('Epoch:', '2150', 'cost=', '0.077167362', 'W=', 0.24863829, 'b=', 0.79678357)\n",
      "('Epoch:', '2200', 'cost=', '0.077166796', 'W=', 0.24852541, 'b=', 0.79760629)\n",
      "('Epoch:', '2250', 'cost=', '0.077166334', 'W=', 0.24842578, 'b=', 0.79833227)\n",
      "('Epoch:', '2300', 'cost=', '0.077165999', 'W=', 0.2483376, 'b=', 0.79897529)\n",
      "('Epoch:', '2350', 'cost=', '0.077165760', 'W=', 0.24825987, 'b=', 0.79954147)\n",
      "('Epoch:', '2400', 'cost=', '0.077165581', 'W=', 0.24819092, 'b=', 0.80004394)\n",
      "('Epoch:', '2450', 'cost=', '0.077165432', 'W=', 0.24813022, 'b=', 0.80048668)\n",
      "('Epoch:', '2500', 'cost=', '0.077165321', 'W=', 0.24807698, 'b=', 0.80087441)\n",
      "('Epoch:', '2550', 'cost=', '0.077165253', 'W=', 0.24802969, 'b=', 0.80121905)\n",
      "('Epoch:', '2600', 'cost=', '0.077165186', 'W=', 0.24798796, 'b=', 0.80152339)\n",
      "('Epoch:', '2650', 'cost=', '0.077165157', 'W=', 0.2479513, 'b=', 0.8017906)\n",
      "('Epoch:', '2700', 'cost=', '0.077165119', 'W=', 0.24791868, 'b=', 0.80202842)\n",
      "('Epoch:', '2750', 'cost=', '0.077165097', 'W=', 0.24789007, 'b=', 0.80223686)\n",
      "('Epoch:', '2800', 'cost=', '0.077165097', 'W=', 0.24786451, 'b=', 0.80242288)\n",
      "('Epoch:', '2850', 'cost=', '0.077165082', 'W=', 0.24784194, 'b=', 0.80258781)\n",
      "('Epoch:', '2900', 'cost=', '0.077165082', 'W=', 0.24782193, 'b=', 0.80273348)\n",
      "('Epoch:', '2950', 'cost=', '0.077165082', 'W=', 0.24780463, 'b=', 0.80285954)\n",
      "('Epoch:', '3000', 'cost=', '0.077165082', 'W=', 0.24778947, 'b=', 0.80296975)\n",
      "('Training cost=', 0.077165082, 'W=', 0.24778947, 'b=', 0.80296975, '\\n')\n",
      "Tssting...\n",
      "('Test LOSS=', 0.079976395)\n",
      "('Final Loss:', 0.0028113127)\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# Launch the graph\n",
    "with tf.Session() as sess:\n",
    "    sess.run(init)\n",
    "\n",
    "    # Fit all training data\n",
    "    for epoch in range(training_epochs):\n",
    "        for (x, y) in zip(train_X, train_Y):\n",
    "            sess.run(optimizer, feed_dict={X: x, Y: y})\n",
    "\n",
    "        # Display logs per epoch step\n",
    "        if (epoch+1) % display_step == 0:\n",
    "            c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})\n",
    "            print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(c), \\\n",
    "                \"W=\", sess.run(W), \"b=\", sess.run(b))\n",
    "\n",
    "\n",
    "    training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})\n",
    "    print(\"Training cost=\", training_cost, \"W=\", sess.run(W), \"b=\", sess.run(b), '\\n')\n",
    "\n",
    " \n",
    "\n",
    "    # the testing data\n",
    "    test_X = numpy.asarray([6.83, 4.668, 8.9, 7.91, 5.7, 8.7, 3.1, 2.1])\n",
    "    test_Y = numpy.asarray([1.84, 2.273, 3.2, 2.831, 2.92, 3.24, 1.35, 1.03])\n",
    "\n",
    "    print(\"Tssting...\")\n",
    "    testing_cost = sess.run(\n",
    "        tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),\n",
    "        feed_dict={X: test_X, Y: test_Y})  # same function as cost above\n",
    "    print(\"Test LOSS=\", testing_cost)\n",
    "    print(\"Final Loss:\", abs(\n",
    "        training_cost - testing_cost))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
