{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train loss: 2.2986374526207265\n",
      "=== epoch:  1 , train acc:  0.234 , test acc:  0.235 time:  1.7198739051818848 ===\n",
      "train loss: 2.2979865872132903\n",
      "train loss: 2.2948183454675046\n",
      "train loss: 2.2856921518529707\n",
      "train loss: 2.278742238234237\n",
      "train loss: 2.2638460923680075\n",
      "train loss: 2.2517893133016282\n",
      "train loss: 2.2562353111054403\n",
      "train loss: 2.2291030308198683\n",
      "train loss: 2.1725141405476345\n",
      "train loss: 2.157558636247948\n",
      "train loss: 2.1331412214706464\n",
      "train loss: 2.063412456099391\n",
      "train loss: 2.0629151696607178\n",
      "train loss: 1.99542446713821\n",
      "train loss: 1.912896502013887\n",
      "train loss: 1.8197213629593147\n",
      "train loss: 1.7842771806769568\n",
      "train loss: 1.624400815554425\n",
      "train loss: 1.5533374636291466\n",
      "train loss: 1.5185391470609346\n",
      "train loss: 1.3841627788873678\n",
      "train loss: 1.314253649575218\n",
      "train loss: 1.3111816897847979\n",
      "train loss: 1.2860267623473154\n",
      "train loss: 1.0550216158636232\n",
      "train loss: 1.1117993887697641\n",
      "train loss: 1.097359808598483\n",
      "train loss: 1.0336609529701513\n",
      "train loss: 1.018602934341857\n",
      "train loss: 0.8324310201209082\n",
      "train loss: 0.8516074323830979\n",
      "train loss: 0.8055337809504405\n",
      "train loss: 0.6991891979947745\n",
      "train loss: 0.7251719208048705\n",
      "train loss: 0.7286899046861686\n",
      "train loss: 0.8531088010242032\n",
      "train loss: 0.623701907032628\n",
      "train loss: 0.6942576766099235\n",
      "train loss: 0.7274445547289099\n",
      "train loss: 0.662199532317522\n",
      "train loss: 0.6645073559685913\n",
      "train loss: 0.7698155054874438\n",
      "train loss: 0.7242465845196139\n",
      "train loss: 0.4797895279878969\n",
      "train loss: 0.5802536405765656\n",
      "train loss: 0.6343965906895622\n",
      "train loss: 0.5188295546902427\n",
      "train loss: 0.5512516446309477\n",
      "train loss: 0.653021058685837\n",
      "train loss: 0.5312714495441864\n",
      "train loss: 0.5870098421967174\n",
      "train loss: 0.5993848007148853\n",
      "train loss: 0.5402536937671931\n",
      "train loss: 0.4773317232134023\n",
      "train loss: 0.3693760502276153\n",
      "train loss: 0.3307014518634979\n",
      "train loss: 0.444226113694121\n",
      "train loss: 0.41979996615038945\n",
      "train loss: 0.4324041266721201\n",
      "train loss: 0.537761072019554\n",
      "train loss: 0.6953368460081011\n",
      "train loss: 0.5091466666016937\n",
      "train loss: 0.40313610933377936\n",
      "train loss: 0.3809695391468496\n",
      "train loss: 0.44113489827115254\n",
      "train loss: 0.5269165165777148\n",
      "train loss: 0.552338664571905\n",
      "train loss: 0.43867101606741565\n",
      "train loss: 0.4679240453960306\n",
      "train loss: 0.43287280141097534\n",
      "train loss: 0.42525495303403216\n",
      "train loss: 0.4820121209053302\n",
      "train loss: 0.3729561617968747\n",
      "train loss: 0.4100655547191401\n",
      "train loss: 0.5328102122641621\n",
      "train loss: 0.5133501050779334\n",
      "train loss: 0.32148895191141313\n",
      "train loss: 0.617398038581181\n",
      "train loss: 0.3719371503042596\n",
      "train loss: 0.40247265056659837\n",
      "train loss: 0.3370907103194063\n",
      "train loss: 0.6098432482095903\n",
      "train loss: 0.4253234023468772\n",
      "train loss: 0.5564344940237792\n",
      "train loss: 0.3874325084731513\n",
      "train loss: 0.531811603643628\n",
      "train loss: 0.4666713825419659\n",
      "train loss: 0.42438258972172216\n",
      "train loss: 0.3862676046932023\n",
      "train loss: 0.35493618889046424\n",
      "train loss: 0.4069176826066693\n",
      "train loss: 0.34305199593780555\n",
      "train loss: 0.43721968294838875\n",
      "train loss: 0.2865012772816335\n",
      "train loss: 0.367302739672715\n",
      "train loss: 0.3847782022019676\n",
      "train loss: 0.320219634233951\n",
      "train loss: 0.3500763693314734\n",
      "train loss: 0.45089188491517396\n",
      "train loss: 0.21583916056471383\n",
      "train loss: 0.3787625091699118\n",
      "train loss: 0.4529519786890235\n",
      "train loss: 0.4956583229633806\n",
      "train loss: 0.33091577745050027\n",
      "train loss: 0.18959514596587274\n",
      "train loss: 0.3333777971666553\n",
      "train loss: 0.49024921224124485\n",
      "train loss: 0.3156452620832599\n",
      "train loss: 0.39093780598661404\n",
      "train loss: 0.352886884543508\n",
      "train loss: 0.2532081947960958\n",
      "train loss: 0.3992356354031539\n",
      "train loss: 0.29785263830840086\n",
      "train loss: 0.3839354321951044\n",
      "train loss: 0.3539339749697561\n",
      "train loss: 0.23923793215770334\n",
      "train loss: 0.3949320605614543\n",
      "train loss: 0.2270125095417271\n",
      "train loss: 0.24571844281792288\n",
      "train loss: 0.32322927258106077\n",
      "=== epoch:  2 , train acc:  0.896 , test acc:  0.891 time:  27.97971796989441 ===\n",
      "train loss: 0.35868309595777825\n",
      "train loss: 0.29375516766684107\n",
      "train loss: 0.35839752793318647\n",
      "train loss: 0.389460628047016\n",
      "train loss: 0.49139839722447093\n",
      "train loss: 0.362976040875444\n",
      "train loss: 0.23662507779352768\n",
      "train loss: 0.3391214913637001\n",
      "train loss: 0.3943784691826431\n",
      "train loss: 0.31469785026203867\n",
      "train loss: 0.33133190818583974\n",
      "train loss: 0.3602178403768583\n",
      "train loss: 0.4009532359800708\n",
      "train loss: 0.26356603606830287\n",
      "train loss: 0.2577187242358582\n",
      "train loss: 0.3538679471658419\n",
      "train loss: 0.4253951992892543\n",
      "train loss: 0.26383869506422925\n",
      "train loss: 0.3399620125863683\n",
      "train loss: 0.35234286933565934\n",
      "train loss: 0.3508372429052759\n",
      "train loss: 0.26121444127667987\n",
      "train loss: 0.3253361407882586\n",
      "train loss: 0.22893365766815435\n",
      "train loss: 0.39825947807923695\n",
      "train loss: 0.21657488020177248\n",
      "train loss: 0.3389745805508278\n",
      "train loss: 0.3558712552620465\n",
      "train loss: 0.24469967296636508\n",
      "train loss: 0.2752143753308864\n",
      "train loss: 0.30661360866932347\n",
      "train loss: 0.24673797508738157\n",
      "train loss: 0.19499601100725938\n",
      "train loss: 0.3034718948156583\n",
      "train loss: 0.45821215775428054\n",
      "train loss: 0.2516563392089442\n",
      "train loss: 0.30648932697284914\n",
      "train loss: 0.22133949118364946\n",
      "train loss: 0.2681286535640977\n",
      "train loss: 0.22130783658835668\n",
      "train loss: 0.1610815366188791\n",
      "train loss: 0.29644401487582905\n",
      "train loss: 0.22480599581495067\n",
      "train loss: 0.3416437062392236\n",
      "train loss: 0.41468958266925376\n",
      "train loss: 0.29544456470589014\n",
      "train loss: 0.2618496274127042\n",
      "train loss: 0.2909271542247031\n",
      "train loss: 0.27259063807368183\n",
      "train loss: 0.2346555023912126\n",
      "train loss: 0.30820829494186114\n",
      "train loss: 0.14493326563581774\n",
      "train loss: 0.26525604491698274\n",
      "train loss: 0.3642661888476244\n",
      "train loss: 0.3642661662325012\n",
      "train loss: 0.2248529980724453\n",
      "train loss: 0.3393400305897347\n",
      "train loss: 0.2552263997219015\n",
      "train loss: 0.2342635302348586\n",
      "train loss: 0.12519994299003406\n",
      "train loss: 0.29267659666628\n",
      "train loss: 0.3868432210477659\n",
      "train loss: 0.14167512408659275\n",
      "train loss: 0.15581036147892563\n",
      "train loss: 0.2697492461169018\n",
      "train loss: 0.2893208855046452\n",
      "train loss: 0.2689484891789417\n",
      "train loss: 0.18557948793195286\n",
      "train loss: 0.25090227853073277\n",
      "train loss: 0.2594076210040491\n",
      "train loss: 0.14928522936462382\n",
      "train loss: 0.36848282728848936\n",
      "train loss: 0.2689537793812079\n",
      "train loss: 0.24562390223399036\n",
      "train loss: 0.2649185220287973\n",
      "train loss: 0.14084314736464829\n",
      "train loss: 0.47380425053792036\n",
      "train loss: 0.25786579706858587\n",
      "train loss: 0.19384937071639746\n",
      "train loss: 0.2094387838227009\n",
      "train loss: 0.19510559647769415\n",
      "train loss: 0.2378410787850111\n",
      "train loss: 0.23770796301480948\n",
      "train loss: 0.18649973248418372\n",
      "train loss: 0.16047890424195177\n",
      "train loss: 0.19522757246076036\n",
      "train loss: 0.30428819236672794\n",
      "train loss: 0.25199889969770695\n",
      "train loss: 0.17659733214596865\n",
      "train loss: 0.40974594967655625\n",
      "train loss: 0.3324651911037212\n",
      "train loss: 0.16818086991451253\n",
      "train loss: 0.1512277915885616\n",
      "train loss: 0.27553964240608786\n",
      "train loss: 0.1750620288103286\n",
      "train loss: 0.35197176623306325\n",
      "train loss: 0.15392094326171935\n",
      "train loss: 0.20437363592495572\n",
      "train loss: 0.23396986366449654\n",
      "train loss: 0.24265241916796587\n",
      "train loss: 0.25902122119257315\n",
      "train loss: 0.2795995901778453\n",
      "train loss: 0.17225234071239304\n",
      "train loss: 0.09590041199572179\n",
      "train loss: 0.19529493745806298\n",
      "train loss: 0.2630643477259501\n",
      "train loss: 0.18846357478837894\n",
      "train loss: 0.15868283418229487\n",
      "train loss: 0.22040652237864236\n",
      "train loss: 0.2682807331868813\n",
      "train loss: 0.2300852543327884\n",
      "train loss: 0.30120771233667964\n",
      "train loss: 0.2425980481047364\n",
      "train loss: 0.2995579231720301\n",
      "train loss: 0.21767340397458257\n",
      "train loss: 0.1591155329847966\n",
      "train loss: 0.14855750846648075\n",
      "train loss: 0.10569158683193218\n",
      "train loss: 0.11684236257396596\n",
      "train loss: 0.1845482480492616\n",
      "=== epoch:  3 , train acc:  0.925 , test acc:  0.923 time:  28.274942636489868 ===\n",
      "train loss: 0.21966597116300154\n",
      "train loss: 0.2152531063516809\n",
      "train loss: 0.1624620127908349\n",
      "train loss: 0.15760120823068316\n",
      "train loss: 0.13188283811479584\n",
      "train loss: 0.31525457469658347\n",
      "train loss: 0.3372000228417448\n",
      "train loss: 0.2236830382515017\n",
      "train loss: 0.17459659018898083\n",
      "train loss: 0.1405849177902256\n",
      "train loss: 0.2735282416414677\n",
      "train loss: 0.22401828900203935\n",
      "train loss: 0.27204577466472046\n",
      "train loss: 0.21620483563027013\n",
      "train loss: 0.25136728747305165\n",
      "train loss: 0.1652799010744617\n",
      "train loss: 0.1669054909804584\n",
      "train loss: 0.0997875065327475\n",
      "train loss: 0.1516181765671751\n",
      "train loss: 0.15830290150923246\n",
      "train loss: 0.1339789537244436\n",
      "train loss: 0.12224385182212467\n",
      "train loss: 0.32532124786483124\n",
      "train loss: 0.18176262726249917\n",
      "train loss: 0.18999676880332153\n",
      "train loss: 0.34276697785202037\n",
      "train loss: 0.21388993057404704\n",
      "train loss: 0.1579610137639513\n",
      "train loss: 0.16501430679072596\n",
      "train loss: 0.16718677473463572\n",
      "train loss: 0.11737207523801527\n",
      "train loss: 0.10119319035758034\n",
      "train loss: 0.14996225694387713\n",
      "train loss: 0.18576952109116596\n",
      "train loss: 0.2050549973671448\n",
      "train loss: 0.37972844973448666\n",
      "train loss: 0.11117620298738563\n",
      "train loss: 0.2376582859471665\n",
      "train loss: 0.14192355499047893\n",
      "train loss: 0.2758388642604436\n",
      "train loss: 0.18949238457986503\n",
      "train loss: 0.1783740294971462\n",
      "train loss: 0.30094521640141836\n",
      "train loss: 0.19184628199437037\n",
      "train loss: 0.25186733599569694\n",
      "train loss: 0.28433795305646875\n",
      "train loss: 0.09826166965017408\n",
      "train loss: 0.19295765456906472\n",
      "train loss: 0.15048636279382685\n",
      "train loss: 0.15022934092783913\n",
      "train loss: 0.3842132113770171\n",
      "train loss: 0.16517780753546224\n",
      "train loss: 0.1384188977643707\n",
      "train loss: 0.12365078286453673\n",
      "train loss: 0.1431435906794951\n",
      "train loss: 0.14073021050359813\n",
      "train loss: 0.24717164523552454\n",
      "train loss: 0.12796095634958551\n",
      "train loss: 0.1464422509001782\n",
      "train loss: 0.12613176736533502\n",
      "train loss: 0.17583306956680203\n",
      "train loss: 0.14758608296856468\n",
      "train loss: 0.18183803829545675\n",
      "train loss: 0.2667520274095692\n",
      "train loss: 0.09082718543252528\n",
      "train loss: 0.21844373326343886\n",
      "train loss: 0.12515168303276697\n",
      "train loss: 0.15468691048187352\n",
      "train loss: 0.1283594513685479\n",
      "train loss: 0.19815551041665125\n",
      "train loss: 0.20124694624178274\n",
      "train loss: 0.1504510897479835\n",
      "train loss: 0.17702054721869684\n",
      "train loss: 0.18956346769749668\n",
      "train loss: 0.18636436500572467\n",
      "train loss: 0.21694196339385463\n",
      "train loss: 0.32308071538428407\n",
      "train loss: 0.20787157251280686\n",
      "train loss: 0.1407092198339015\n",
      "train loss: 0.10865835755693652\n",
      "train loss: 0.11536373175324424\n",
      "train loss: 0.23578769810832118\n",
      "train loss: 0.23168760944110683\n",
      "train loss: 0.1467507564714304\n",
      "train loss: 0.125417090799526\n",
      "train loss: 0.19758168465739728\n",
      "train loss: 0.12776193531434996\n",
      "train loss: 0.06652570458420365\n",
      "train loss: 0.12802323072349628\n",
      "train loss: 0.1263091732592979\n",
      "train loss: 0.21303731403390708\n",
      "train loss: 0.21056777705952842\n",
      "train loss: 0.19876901435113747\n",
      "train loss: 0.2868599617195235\n",
      "train loss: 0.2907845576989023\n",
      "train loss: 0.08287455990678698\n",
      "train loss: 0.21186265461863765\n",
      "train loss: 0.13870920343264528\n",
      "train loss: 0.07075125225739623\n",
      "train loss: 0.054021217729038166\n",
      "train loss: 0.12029963542569287\n",
      "train loss: 0.0831616208590237\n",
      "train loss: 0.1853788046987846\n",
      "train loss: 0.15605575488288956\n",
      "train loss: 0.16132224481125376\n",
      "train loss: 0.20684321916645396\n",
      "train loss: 0.20164398416985096\n",
      "train loss: 0.1298516697504168\n",
      "train loss: 0.04389212046968817\n",
      "train loss: 0.1724217849576527\n",
      "train loss: 0.13362657484845084\n",
      "train loss: 0.10786810121581805\n",
      "train loss: 0.07082554573971266\n",
      "train loss: 0.14780002168381007\n",
      "train loss: 0.08869457476002757\n",
      "train loss: 0.054576349716845246\n",
      "train loss: 0.2048346845002771\n",
      "train loss: 0.1370386393334834\n",
      "train loss: 0.14522627791970458\n",
      "train loss: 0.12449706033252776\n",
      "=== epoch:  4 , train acc:  0.948 , test acc:  0.935 time:  30.28648567199707 ===\n",
      "train loss: 0.2731310743504679\n",
      "train loss: 0.16214536258106815\n",
      "train loss: 0.1520517949647957\n",
      "train loss: 0.1504261962215913\n",
      "train loss: 0.1290857034761719\n",
      "train loss: 0.22540019631153801\n",
      "train loss: 0.19382634578938218\n",
      "train loss: 0.18124084437768295\n",
      "train loss: 0.2008145247636952\n",
      "train loss: 0.20096179653814178\n",
      "train loss: 0.10388420666842353\n",
      "train loss: 0.1312856277085192\n",
      "train loss: 0.276173679487728\n",
      "train loss: 0.10770775741228411\n",
      "train loss: 0.1543553314548855\n",
      "train loss: 0.3018500138121008\n",
      "train loss: 0.06531224184932025\n",
      "train loss: 0.07123455336912671\n",
      "train loss: 0.07264165211371484\n",
      "train loss: 0.14868736757930406\n",
      "train loss: 0.09147843204797218\n",
      "train loss: 0.11133669864441707\n",
      "train loss: 0.18033987400466253\n",
      "train loss: 0.0614631414257059\n",
      "train loss: 0.11924408427370121\n",
      "train loss: 0.08158616938717382\n",
      "train loss: 0.20268122744700395\n",
      "train loss: 0.11051841389845526\n",
      "train loss: 0.12736829079403897\n",
      "train loss: 0.23603353791709533\n",
      "train loss: 0.13584849395568313\n",
      "train loss: 0.1454010498560366\n",
      "train loss: 0.12059281035294869\n",
      "train loss: 0.1654907188602296\n",
      "train loss: 0.13278416144370392\n",
      "train loss: 0.16333512239828876\n",
      "train loss: 0.12229460373353125\n",
      "train loss: 0.1479565003387813\n",
      "train loss: 0.09342764133987248\n",
      "train loss: 0.1752344595292977\n",
      "train loss: 0.08823185028492024\n",
      "train loss: 0.09313990548496695\n",
      "train loss: 0.1096743104923415\n",
      "train loss: 0.10341411212151287\n",
      "train loss: 0.18121325452300818\n",
      "train loss: 0.12400420068862529\n",
      "train loss: 0.1340024738963246\n",
      "train loss: 0.08203750814685355\n",
      "train loss: 0.08846682632263514\n",
      "train loss: 0.14147136505382568\n",
      "train loss: 0.08617013438922154\n",
      "train loss: 0.16694624396460828\n",
      "train loss: 0.186121334257257\n",
      "train loss: 0.10673478195971053\n",
      "train loss: 0.0818811477332045\n",
      "train loss: 0.11671986696920765\n",
      "train loss: 0.11573014095137482\n",
      "train loss: 0.12463586048473113\n",
      "train loss: 0.18923292703878797\n",
      "train loss: 0.17642496320478102\n",
      "train loss: 0.17743928981263274\n",
      "train loss: 0.08530934755312959\n",
      "train loss: 0.19416287508081748\n",
      "train loss: 0.17426149297754773\n",
      "train loss: 0.10827970508117772\n",
      "train loss: 0.21703381604868047\n",
      "train loss: 0.06387135849769521\n",
      "train loss: 0.10549569542118688\n",
      "train loss: 0.18267813342477568\n",
      "train loss: 0.16435886014546106\n",
      "train loss: 0.08458829907373268\n",
      "train loss: 0.0820089113242441\n",
      "train loss: 0.1757255390901732\n",
      "train loss: 0.11163315960165741\n",
      "train loss: 0.12832688413135307\n",
      "train loss: 0.23487395812779596\n",
      "train loss: 0.07726425239650628\n",
      "train loss: 0.24724700696869042\n",
      "train loss: 0.0776347683918372\n",
      "train loss: 0.1415141186542059\n",
      "train loss: 0.06068763931568328\n",
      "train loss: 0.0841765471962188\n",
      "train loss: 0.11910540997062231\n",
      "train loss: 0.1509950134670361\n",
      "train loss: 0.15407964577897748\n",
      "train loss: 0.2026782415549644\n",
      "train loss: 0.13650517675994697\n",
      "train loss: 0.1235348356902074\n",
      "train loss: 0.13385822714788026\n",
      "train loss: 0.047930468702508854\n",
      "train loss: 0.08700662212795049\n",
      "train loss: 0.1287067181194584\n",
      "train loss: 0.1359648418512983\n",
      "train loss: 0.0837198115231069\n",
      "train loss: 0.11927735715445588\n",
      "train loss: 0.14501488297842122\n",
      "train loss: 0.18258179736224986\n",
      "train loss: 0.11011830423258122\n",
      "train loss: 0.15802978410484272\n",
      "train loss: 0.06326579276931318\n",
      "train loss: 0.1310280630583078\n",
      "train loss: 0.14263897717686455\n",
      "train loss: 0.14120785937929953\n",
      "train loss: 0.06109309612927245\n",
      "train loss: 0.11274419726853355\n",
      "train loss: 0.05157127625853098\n",
      "train loss: 0.12594444086695888\n",
      "train loss: 0.09364929346178597\n",
      "train loss: 0.11106385162827043\n",
      "train loss: 0.11811971156398046\n",
      "train loss: 0.10457233517132045\n",
      "train loss: 0.15823474180510552\n",
      "train loss: 0.08470676012690853\n",
      "train loss: 0.13688716122681807\n",
      "train loss: 0.08870947073975283\n",
      "train loss: 0.07892831818165003\n",
      "train loss: 0.15730818432810129\n",
      "train loss: 0.12176622091622334\n",
      "train loss: 0.06421014910138395\n",
      "train loss: 0.07673683311281851\n",
      "=== epoch:  5 , train acc:  0.957 , test acc:  0.951 time:  31.83357000350952 ===\n",
      "train loss: 0.07707275475555729\n",
      "train loss: 0.22215299948466063\n",
      "train loss: 0.06743185392624527\n",
      "train loss: 0.1017956917669426\n",
      "train loss: 0.10342556807143467\n",
      "train loss: 0.09109436208070719\n",
      "train loss: 0.08928787443759387\n",
      "train loss: 0.18015823393714647\n",
      "train loss: 0.10665221019037872\n",
      "train loss: 0.06636484567314654\n",
      "train loss: 0.2863986997790679\n",
      "train loss: 0.057514184524589146\n",
      "train loss: 0.07497035247233193\n",
      "train loss: 0.08840102623403831\n",
      "train loss: 0.08743448316790378\n",
      "train loss: 0.1395253574003407\n",
      "train loss: 0.11308108640317671\n",
      "train loss: 0.12387375219057009\n",
      "train loss: 0.16145007625731506\n",
      "train loss: 0.11233710962466915\n",
      "train loss: 0.17356079675474478\n",
      "train loss: 0.20480804508770795\n",
      "train loss: 0.09605585317652197\n",
      "train loss: 0.07380428468344052\n",
      "train loss: 0.12872473731955641\n",
      "train loss: 0.09142016387023956\n",
      "train loss: 0.07002119612076042\n",
      "train loss: 0.13567513704510767\n",
      "train loss: 0.12746809808733275\n",
      "train loss: 0.11996023460636998\n",
      "train loss: 0.12370931381050575\n",
      "train loss: 0.06728238021157268\n",
      "train loss: 0.14482182901354343\n",
      "train loss: 0.042589805796048895\n",
      "train loss: 0.08826173530711387\n",
      "train loss: 0.1717738472190314\n",
      "train loss: 0.11791858830304594\n",
      "train loss: 0.12494105832000266\n",
      "train loss: 0.14622902218390474\n",
      "train loss: 0.18809880814122107\n",
      "train loss: 0.07332436888166773\n",
      "train loss: 0.08237693395837065\n",
      "train loss: 0.1315511784722956\n",
      "train loss: 0.10396015853917888\n",
      "train loss: 0.12412084066996383\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m                         Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[1], line 28\u001B[0m\n\u001B[0;32m     22\u001B[0m trainer \u001B[38;5;241m=\u001B[39m Trainer(network, x_train, t_train, x_test, t_test,\n\u001B[0;32m     23\u001B[0m                   epochs\u001B[38;5;241m=\u001B[39mmax_epochs, mini_batch_size\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m100\u001B[39m,\n\u001B[0;32m     24\u001B[0m                   optimizer\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mAdam\u001B[39m\u001B[38;5;124m'\u001B[39m, optimizer_param\u001B[38;5;241m=\u001B[39m{\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mlr\u001B[39m\u001B[38;5;124m'\u001B[39m: \u001B[38;5;241m0.001\u001B[39m},\n\u001B[0;32m     25\u001B[0m                   evaluate_sample_num_per_epoch\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1000\u001B[39m)\n\u001B[0;32m     27\u001B[0m \u001B[38;5;66;03m# 开始训练\u001B[39;00m\n\u001B[1;32m---> 28\u001B[0m \u001B[43mtrainer\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtrain\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     29\u001B[0m \u001B[38;5;66;03m# 保存训练结果\u001B[39;00m\n\u001B[0;32m     30\u001B[0m network\u001B[38;5;241m.\u001B[39msave_params(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mparams.pkl\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\trainer.py:91\u001B[0m, in \u001B[0;36mTrainer.train\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m     89\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mtrain\u001B[39m(\u001B[38;5;28mself\u001B[39m):\n\u001B[0;32m     90\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mmax_iter):\n\u001B[1;32m---> 91\u001B[0m         \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mtrain_step\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     93\u001B[0m     test_acc \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mnetwork\u001B[38;5;241m.\u001B[39maccuracy(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mx_test, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mt_test)\n\u001B[0;32m     95\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mverbose:\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\trainer.py:59\u001B[0m, in \u001B[0;36mTrainer.train_step\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m     56\u001B[0m t_batch \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mt_train[batch_mask]\n\u001B[0;32m     58\u001B[0m \u001B[38;5;66;03m# 获取梯度，更新权重\u001B[39;00m\n\u001B[1;32m---> 59\u001B[0m grads \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mnetwork\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mgradient\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx_batch\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mt_batch\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     60\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39moptimizer\u001B[38;5;241m.\u001B[39mupdate(\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mnetwork\u001B[38;5;241m.\u001B[39mparams, grads)\n\u001B[0;32m     62\u001B[0m \u001B[38;5;66;03m# 记录损失函数值\u001B[39;00m\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\simple_conv_net.py:64\u001B[0m, in \u001B[0;36mSimpleConvNet.gradient\u001B[1;34m(self, x, t)\u001B[0m\n\u001B[0;32m     62\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mgradient\u001B[39m(\u001B[38;5;28mself\u001B[39m, x, t):\n\u001B[0;32m     63\u001B[0m     \u001B[38;5;66;03m# forward\u001B[39;00m\n\u001B[1;32m---> 64\u001B[0m     \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mloss\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mt\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     66\u001B[0m     \u001B[38;5;66;03m# backward\u001B[39;00m\n\u001B[0;32m     67\u001B[0m     dout \u001B[38;5;241m=\u001B[39m \u001B[38;5;241m1\u001B[39m\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\simple_conv_net.py:47\u001B[0m, in \u001B[0;36mSimpleConvNet.loss\u001B[1;34m(self, x, t)\u001B[0m\n\u001B[0;32m     46\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mloss\u001B[39m(\u001B[38;5;28mself\u001B[39m, x, t):\n\u001B[1;32m---> 47\u001B[0m     y \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mpredict\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     48\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mlast_layer\u001B[38;5;241m.\u001B[39mforward(y, t)\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\simple_conv_net.py:42\u001B[0m, in \u001B[0;36mSimpleConvNet.predict\u001B[1;34m(self, x)\u001B[0m\n\u001B[0;32m     40\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mpredict\u001B[39m(\u001B[38;5;28mself\u001B[39m, x):\n\u001B[0;32m     41\u001B[0m     \u001B[38;5;28;01mfor\u001B[39;00m layer \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mlayers\u001B[38;5;241m.\u001B[39mvalues():\n\u001B[1;32m---> 42\u001B[0m         x \u001B[38;5;241m=\u001B[39m \u001B[43mlayer\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mforward\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     44\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m x\n",
      "File \u001B[1;32m~\\Desktop\\DEV\\study\\Python\\DeepLearningIntroduction\\common\\layers.py:259\u001B[0m, in \u001B[0;36mPooling.forward\u001B[1;34m(self, x)\u001B[0m\n\u001B[0;32m    257\u001B[0m \u001B[38;5;66;03m# 最大值\u001B[39;00m\n\u001B[0;32m    258\u001B[0m arg_max \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39margmax(col, axis\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1\u001B[39m)\n\u001B[1;32m--> 259\u001B[0m out \u001B[38;5;241m=\u001B[39m \u001B[43mnp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmax\u001B[49m\u001B[43m(\u001B[49m\u001B[43mcol\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43maxis\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;241;43m1\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[0;32m    260\u001B[0m out \u001B[38;5;241m=\u001B[39m out\u001B[38;5;241m.\u001B[39mreshape(N, out_h, out_w, C)\u001B[38;5;241m.\u001B[39mtranspose(\u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m3\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m2\u001B[39m)\n\u001B[0;32m    262\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mx \u001B[38;5;241m=\u001B[39m x\n",
      "File \u001B[1;32mD:\\dev\\miniconda3\\envs\\DeepLearningIntroduction\\lib\\site-packages\\numpy\\core\\fromnumeric.py:2810\u001B[0m, in \u001B[0;36mmax\u001B[1;34m(a, axis, out, keepdims, initial, where)\u001B[0m\n\u001B[0;32m   2692\u001B[0m \u001B[38;5;129m@array_function_dispatch\u001B[39m(_max_dispatcher)\n\u001B[0;32m   2693\u001B[0m \u001B[38;5;129m@set_module\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mnumpy\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m   2694\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mmax\u001B[39m(a, axis\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, out\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, keepdims\u001B[38;5;241m=\u001B[39mnp\u001B[38;5;241m.\u001B[39m_NoValue, initial\u001B[38;5;241m=\u001B[39mnp\u001B[38;5;241m.\u001B[39m_NoValue,\n\u001B[0;32m   2695\u001B[0m          where\u001B[38;5;241m=\u001B[39mnp\u001B[38;5;241m.\u001B[39m_NoValue):\n\u001B[0;32m   2696\u001B[0m \u001B[38;5;250m    \u001B[39m\u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m   2697\u001B[0m \u001B[38;5;124;03m    Return the maximum of an array or maximum along an axis.\u001B[39;00m\n\u001B[0;32m   2698\u001B[0m \n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   2808\u001B[0m \u001B[38;5;124;03m    5\u001B[39;00m\n\u001B[0;32m   2809\u001B[0m \u001B[38;5;124;03m    \"\"\"\u001B[39;00m\n\u001B[1;32m-> 2810\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43m_wrapreduction\u001B[49m\u001B[43m(\u001B[49m\u001B[43ma\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mnp\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmaximum\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mmax\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43maxis\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mout\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m   2811\u001B[0m \u001B[43m                          \u001B[49m\u001B[43mkeepdims\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mkeepdims\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43minitial\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43minitial\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mwhere\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43mwhere\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\dev\\miniconda3\\envs\\DeepLearningIntroduction\\lib\\site-packages\\numpy\\core\\fromnumeric.py:88\u001B[0m, in \u001B[0;36m_wrapreduction\u001B[1;34m(obj, ufunc, method, axis, dtype, out, **kwargs)\u001B[0m\n\u001B[0;32m     85\u001B[0m         \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m     86\u001B[0m             \u001B[38;5;28;01mreturn\u001B[39;00m reduction(axis\u001B[38;5;241m=\u001B[39maxis, out\u001B[38;5;241m=\u001B[39mout, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mpasskwargs)\n\u001B[1;32m---> 88\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m ufunc\u001B[38;5;241m.\u001B[39mreduce(obj, axis, dtype, out, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mpasskwargs)\n",
      "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
     ]
    }
   ],
   "source": [
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "from dataset.mnist import load_mnist\n",
    "from common.optimizer import *\n",
    "from common.util import smooth_curve\n",
    "from common.simple_conv_net import SimpleConvNet\n",
    "from common.trainer import Trainer\n",
    "\n",
    "# 0.读入MNIST数据\n",
    "(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)\n",
    "\n",
    "# 减少学习数据\n",
    "x_train = x_train[:12000]\n",
    "t_train = t_train[:12000]\n",
    "\n",
    "# 设置是否使用Dropout及比例\n",
    "max_epochs = 20\n",
    "\n",
    "network = SimpleConvNet(input_dim=(1, 28, 28),\n",
    "                        conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},\n",
    "                        hidden_size=100, output_size=10, weight_init_std=0.01)\n",
    "trainer = Trainer(network, x_train, t_train, x_test, t_test,\n",
    "                  epochs=max_epochs, mini_batch_size=100,\n",
    "                  optimizer='Adam', optimizer_param={'lr': 0.001},\n",
    "                  evaluate_sample_num_per_epoch=1000)\n",
    "\n",
    "# 开始训练\n",
    "trainer.train()\n",
    "# 保存训练结果\n",
    "network.save_params(\"params.pkl\")\n",
    "print(\"Saved Network Parameters!\")\n",
    "\n",
    "# 绘制图像\n",
    "markers = {\"train\": \"o\", \"test\": \"s\"}\n",
    "x = np.arange(max_epochs)\n",
    "plt.plot(x, smooth_curve(trainer.train_acc_list), marker='o', label='train', markevery=2)\n",
    "plt.plot(x, smooth_curve(trainer.test_acc_list), marker='s', label='test', markevery=2)\n",
    "plt.xlabel(\"epochs\")\n",
    "plt.ylabel(\"accuracy\")\n",
    "plt.ylim(0, 1.0)\n",
    "plt.legend(loc='lower right')\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}