{"cells":[{"outputs":[],"execution_count":null,"source":"# 查看当前挂载的数据集目录\n!ls /home/kesci/input/","cell_type":"code","metadata":{"trusted":true,"collapsed":false,"id":"56C726F3A04245B986BFD25BF2717F08"}},{"outputs":[],"execution_count":null,"source":"# 查看个人持久化工作区文件\n!ls /home/kesci/work/","cell_type":"code","metadata":{"trusted":true,"collapsed":false,"id":"E4E9A6B60A214F95A5C87807D8F37AD8"}},{"outputs":[],"execution_count":null,"source":"# 查看当前kernerl下的package\n!pip list --format=columns","cell_type":"code","metadata":{"trusted":true,"collapsed":false,"id":"E8B36F770C8149CF8E36F8C91394F023"}},{"outputs":[{"output_type":"stream","text":"开始加载数据\n加载数据完成\ntrain starting\n","name":"stdout"},{"output_type":"stream","text":"/opt/conda/lib/python3.6/site-packages/smart_open/smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n  'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n","name":"stderr"},{"output_type":"stream","text":"WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:189: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\nInstructions for updating:\nUse `tf.global_variables_initializer` instead.\nstep 10,loss:1.0462\nstep 20,loss:0.632172\nstep 30,loss:0.525473\nstep 40,loss:0.471719\nstep 50,loss:0.527251\nstep 60,loss:0.454237\nstep 70,loss:0.48034\nstep 80,loss:0.446083\nstep 90,loss:0.444642\nstep 100,loss:0.449053\nstep 110,loss:0.511843\nstep 120,loss:0.479241\nstep 130,loss:0.473502\nstep 140,loss:0.465574\nstep 150,loss:0.460672\nstep 160,loss:0.497939\nstep 170,loss:0.448107\nstep 180,loss:0.492322\nstep 190,loss:0.503006\nstep 200,loss:0.449122\nstep 210,loss:0.473196\nstep 220,loss:0.492591\nstep 230,loss:0.460479\nstep 240,loss:0.485445\nstep 250,loss:0.453205\nstep 260,loss:0.460617\nstep 270,loss:0.453467\nstep 280,loss:0.430042\nstep 290,loss:0.460539\nstep 300,loss:0.501141\nstep 310,loss:0.459243\nstep 320,loss:0.454452\nstep 330,loss:0.45144\nstep 340,loss:0.455306\nstep 350,loss:0.475255\nstep 360,loss:0.449272\nstep 370,loss:0.485346\nstep 380,loss:0.45997\nstep 390,loss:0.501605\nstep 400,loss:0.449306\nstep 410,loss:0.443277\nstep 420,loss:0.491823\nstep 430,loss:0.500032\nstep 440,loss:0.500549\nstep 450,loss:0.420891\nstep 460,loss:0.443918\nstep 470,loss:0.45322\nstep 480,loss:0.463207\nstep 490,loss:0.451554\nstep 500,loss:0.480983\n距离训练完一整轮剩余时间： 298.3222647193819  分钟\nper_train_loss_avg: 0.545631054341793 **********\nvaild_loss: 0.46763456\nstep 510,loss:0.449159\nstep 520,loss:0.451904\nstep 530,loss:0.483178\nstep 540,loss:0.466064\nstep 550,loss:0.48336\nstep 560,loss:0.474377\nstep 570,loss:0.468772\nstep 580,loss:0.443056\nstep 590,loss:0.458025\nstep 600,loss:0.43451\nstep 610,loss:0.457558\nstep 620,loss:0.477564\nstep 630,loss:0.47492\nstep 640,loss:0.479842\nstep 650,loss:0.483709\nstep 660,loss:0.46838\nstep 670,loss:0.487329\nstep 680,loss:0.500574\nstep 690,loss:0.475984\nstep 700,loss:0.466089\nstep 710,loss:0.494755\nstep 720,loss:0.481445\nstep 730,loss:0.476662\nstep 740,loss:0.446048\nstep 750,loss:0.434625\nstep 760,loss:0.480203\nstep 770,loss:0.466548\nstep 780,loss:0.473155\nstep 790,loss:0.489826\nstep 800,loss:0.496994\nstep 810,loss:0.499787\nstep 820,loss:0.450682\nstep 830,loss:0.47612\nstep 840,loss:0.457849\nstep 850,loss:0.469791\nstep 860,loss:0.458981\nstep 870,loss:0.43708\nstep 880,loss:0.479689\nstep 890,loss:0.444942\nstep 900,loss:0.491437\nstep 910,loss:0.469469\nstep 920,loss:0.472481\nstep 930,loss:0.454053\nstep 940,loss:0.473919\nstep 950,loss:0.477461\nstep 960,loss:0.468811\nstep 970,loss:0.472258\nstep 980,loss:0.465559\nstep 990,loss:0.468678\nstep 1000,loss:0.473804\n距离训练完一整轮剩余时间： 291.7233983298143  分钟\nper_train_loss_avg: 0.4679115279912949 **********\nvaild_loss: 0.461225\nstep 1010,loss:0.482076\nstep 1020,loss:0.472809\nstep 1030,loss:0.483125\nstep 1040,loss:0.477855\nstep 1050,loss:0.471202\nstep 1060,loss:0.482754\nstep 1070,loss:0.463764\nstep 1080,loss:0.477233\nstep 1090,loss:0.491877\nstep 1100,loss:0.490877\nstep 1110,loss:0.440302\nstep 1120,loss:0.449863\nstep 1130,loss:0.461182\nstep 1140,loss:0.44805\nstep 1150,loss:0.428836\nstep 1160,loss:0.45215\nstep 1170,loss:0.455831\nstep 1180,loss:0.467805\nstep 1190,loss:0.461631\nstep 1200,loss:0.455271\nstep 1210,loss:0.443502\nstep 1220,loss:0.474108\nstep 1230,loss:0.490908\nstep 1240,loss:0.480535\nstep 1250,loss:0.481272\nstep 1260,loss:0.498635\nstep 1270,loss:0.415926\nstep 1280,loss:0.476324\nstep 1290,loss:0.518369\nstep 1300,loss:0.47368\nstep 1310,loss:0.473865\nstep 1320,loss:0.434562\nstep 1330,loss:0.443316\nstep 1340,loss:0.447517\nstep 1350,loss:0.453581\nstep 1360,loss:0.465926\nstep 1370,loss:0.475184\nstep 1380,loss:0.483821\nstep 1390,loss:0.434673\nstep 1400,loss:0.494207\nstep 1410,loss:0.487284\nstep 1420,loss:0.442568\nstep 1430,loss:0.477398\nstep 1440,loss:0.470191\nstep 1450,loss:0.478844\nstep 1460,loss:0.465134\nstep 1470,loss:0.444825\nstep 1480,loss:0.438524\nstep 1490,loss:0.479503\nstep 1500,loss:0.454953\n距离训练完一整轮剩余时间： 287.17513196778793  分钟\nper_train_loss_avg: 0.4655977815389633 **********\nvaild_loss: 0.47027734\nstep 1510,loss:0.459542\nstep 1520,loss:0.47838\nstep 1530,loss:0.471185\nstep 1540,loss:0.50389\nstep 1550,loss:0.465012\nstep 1560,loss:0.469388\nstep 1570,loss:0.457383\nstep 1580,loss:0.493468\nstep 1590,loss:0.447117\nstep 1600,loss:0.462037\nstep 1610,loss:0.462857\nstep 1620,loss:0.486584\nstep 1630,loss:0.465577\nstep 1640,loss:0.466661\nstep 1650,loss:0.460452\nstep 1660,loss:0.468801\nstep 1670,loss:0.467281\nstep 1680,loss:0.488225\nstep 1690,loss:0.460573\nstep 1700,loss:0.443147\nstep 1710,loss:0.457626\nstep 1720,loss:0.477327\nstep 1730,loss:0.461293\nstep 1740,loss:0.464344\nstep 1750,loss:0.491136\nstep 1760,loss:0.475586\nstep 1770,loss:0.466897\nstep 1780,loss:0.457118\nstep 1790,loss:0.460472\nstep 1800,loss:0.486117\nstep 1810,loss:0.489318\nstep 1820,loss:0.480756\nstep 1830,loss:0.467523\nstep 1840,loss:0.467831\nstep 1850,loss:0.470152\nstep 1860,loss:0.437883\nstep 1870,loss:0.444967\nstep 1880,loss:0.4717\nstep 1890,loss:0.481643\nstep 1900,loss:0.446751\nstep 1910,loss:0.435161\nstep 1920,loss:0.433112\nstep 1930,loss:0.489714\nstep 1940,loss:0.444553\nstep 1950,loss:0.477026\nstep 1960,loss:0.474849\nstep 1970,loss:0.440242\nstep 1980,loss:0.46464\nstep 1990,loss:0.460732\nstep 2000,loss:0.478656\n距离训练完一整轮剩余时间： 285.8765370592475  分钟\nper_train_loss_avg: 0.464664092361927 **********\nvaild_loss: 0.4795457\nstep 2010,loss:0.48091\nstep 2020,loss:0.443762\nstep 2030,loss:0.469561\nstep 2040,loss:0.437011\nstep 2050,loss:0.459225\nstep 2060,loss:0.463787\nstep 2070,loss:0.452731\nstep 2080,loss:0.441038\nstep 2090,loss:0.478029\nstep 2100,loss:0.477422\nstep 2110,loss:0.450814\nstep 2120,loss:0.467631\nstep 2130,loss:0.503024\nstep 2140,loss:0.46624\nstep 2150,loss:0.485641\nstep 2160,loss:0.459333\nstep 2170,loss:0.46341\nstep 2180,loss:0.476781\nstep 2190,loss:0.490864\nstep 2200,loss:0.451481\nstep 2210,loss:0.487256\nstep 2220,loss:0.461936\nstep 2230,loss:0.443516\nstep 2240,loss:0.453881\nstep 2250,loss:0.446183\nstep 2260,loss:0.476235\nstep 2270,loss:0.465375\nstep 2280,loss:0.46881\nstep 2290,loss:0.439111\nstep 2300,loss:0.440681\nstep 2310,loss:0.469634\nstep 2320,loss:0.488782\nstep 2330,loss:0.467792\nstep 2340,loss:0.424544\nstep 2350,loss:0.465932\nstep 2360,loss:0.460472\nstep 2370,loss:0.466368\nstep 2380,loss:0.477074\nstep 2390,loss:0.496645\nstep 2400,loss:0.434771\nstep 2410,loss:0.489824\nstep 2420,loss:0.470532\nstep 2430,loss:0.430826\nstep 2440,loss:0.482923\nstep 2450,loss:0.47152\nstep 2460,loss:0.477361\nstep 2470,loss:0.487741\nstep 2480,loss:0.4578\nstep 2490,loss:0.447429\nstep 2500,loss:0.508131\n距离训练完一整轮剩余时间： 281.9772245188554  分钟\nper_train_loss_avg: 0.46459157109260557 **********\nvaild_loss: 0.4590923\nstep 2510,loss:0.457102\nstep 2520,loss:0.465939\nstep 2530,loss:0.451529\nstep 2540,loss:0.447303\nstep 2550,loss:0.461981\nstep 2560,loss:0.448781\nstep 2570,loss:0.462746\nstep 2580,loss:0.47187\nstep 2590,loss:0.461204\nstep 2600,loss:0.455641\nstep 2610,loss:0.455966\nstep 2620,loss:0.459015\nstep 2630,loss:0.464474\nstep 2640,loss:0.438476\nstep 2650,loss:0.439443\nstep 2660,loss:0.495874\nstep 2670,loss:0.495484\nstep 2680,loss:0.467368\nstep 2690,loss:0.456973\nstep 2700,loss:0.468721\nstep 2710,loss:0.464265\nstep 2720,loss:0.449181\nstep 2730,loss:0.453194\nstep 2740,loss:0.469063\nstep 2750,loss:0.4526\nstep 2760,loss:0.497935\nstep 2770,loss:0.45956\nstep 2780,loss:0.470387\nstep 2790,loss:0.476838\nstep 2800,loss:0.443699\nstep 2810,loss:0.455755\nstep 2820,loss:0.468812\nstep 2830,loss:0.448877\nstep 2840,loss:0.44784\nstep 2850,loss:0.477545\nstep 2860,loss:0.418355\nstep 2870,loss:0.44809\nstep 2880,loss:0.453796\nstep 2890,loss:0.494042\nstep 2900,loss:0.46654\nstep 2910,loss:0.456712\nstep 2920,loss:0.473807\nstep 2930,loss:0.437621\nstep 2940,loss:0.430327\nstep 2950,loss:0.479769\nstep 2960,loss:0.444696\nstep 2970,loss:0.456996\nstep 2980,loss:0.461133\nstep 2990,loss:0.474367\nstep 3000,loss:0.443566\n距离训练完一整轮剩余时间： 278.1774816226214  分钟\nper_train_loss_avg: 0.46373680877685547 **********\nvaild_loss: 0.44988027\nstep 3010,loss:0.461194\nstep 3020,loss:0.419143\nstep 3030,loss:0.452012\nstep 3040,loss:0.461975\nstep 3050,loss:0.471946\nstep 3060,loss:0.478672\nstep 3070,loss:0.474787\nstep 3080,loss:0.465289\nstep 3090,loss:0.46602\nstep 3100,loss:0.472033\nstep 3110,loss:0.489585\nstep 3120,loss:0.444172\nstep 3130,loss:0.454829\nstep 3140,loss:0.441826\nstep 3150,loss:0.460537\nstep 3160,loss:0.458155\nstep 3170,loss:0.451012\nstep 3180,loss:0.469832\nstep 3190,loss:0.457175\nstep 3200,loss:0.445742\nstep 3210,loss:0.453431\nstep 3220,loss:0.464014\nstep 3230,loss:0.446652\nstep 3240,loss:0.461286\nstep 3250,loss:0.459926\nstep 3260,loss:0.471321\nstep 3270,loss:0.468371\nstep 3280,loss:0.474318\nstep 3290,loss:0.470245\nstep 3300,loss:0.448281\nstep 3310,loss:0.46924\nstep 3320,loss:0.478355\nstep 3330,loss:0.448296\nstep 3340,loss:0.457724\nstep 3350,loss:0.440876\nstep 3360,loss:0.455214\nstep 3370,loss:0.493059\nstep 3380,loss:0.45374\nstep 3390,loss:0.441799\nstep 3400,loss:0.482042\nstep 3410,loss:0.480895\nstep 3420,loss:0.454239\nstep 3430,loss:0.44437\nstep 3440,loss:0.465893\nstep 3450,loss:0.463336\nstep 3460,loss:0.46356\nstep 3470,loss:0.470532\nstep 3480,loss:0.489992\nstep 3490,loss:0.461085\nstep 3500,loss:0.454153\n距离训练完一整轮剩余时间： 276.65258874185383  分钟\nper_train_loss_avg: 0.46247549772262575 **********\nvaild_loss: 0.4611932\nstep 3510,loss:0.453985\nstep 3520,loss:0.456887\nstep 3530,loss:0.426487\nstep 3540,loss:0.459008\nstep 3550,loss:0.453174\nstep 3560,loss:0.478383\nstep 3570,loss:0.465161\nstep 3580,loss:0.465733\nstep 3590,loss:0.4312\nstep 3600,loss:0.451727\nstep 3610,loss:0.457816\nstep 3620,loss:0.486691\nstep 3630,loss:0.489682\nstep 3640,loss:0.47523\nstep 3650,loss:0.451092\nstep 3660,loss:0.448854\nstep 3670,loss:0.4609\nstep 3680,loss:0.447762\nstep 3690,loss:0.472521\nstep 3700,loss:0.45075\nstep 3710,loss:0.456556\nstep 3720,loss:0.467577\nstep 3730,loss:0.463573\nstep 3740,loss:0.433603\nstep 3750,loss:0.464256\nstep 3760,loss:0.460861\nstep 3770,loss:0.489804\nstep 3780,loss:0.503767\nstep 3790,loss:0.465662\nstep 3800,loss:0.513171\nstep 3810,loss:0.477289\nstep 3820,loss:0.465438\nstep 3830,loss:0.460423\nstep 3840,loss:0.470054\nstep 3850,loss:0.436036\nstep 3860,loss:0.489195\nstep 3870,loss:0.469857\nstep 3880,loss:0.47327\nstep 3890,loss:0.468505\nstep 3900,loss:0.460742\nstep 3910,loss:0.498922\nstep 3920,loss:0.477247\nstep 3930,loss:0.483162\nstep 3940,loss:0.488627\nstep 3950,loss:0.48386\nstep 3960,loss:0.462055\nstep 3970,loss:0.456583\nstep 3980,loss:0.469288\nstep 3990,loss:0.461571\nstep 4000,loss:0.421302\n距离训练完一整轮剩余时间： 272.0365791839858  分钟\nper_train_loss_avg: 0.46669209641218184 **********\nvaild_loss: 0.46958053\nstep 4010,loss:0.459679\nstep 4020,loss:0.478513\nstep 4030,loss:0.45029\nstep 4040,loss:0.4824\nstep 4050,loss:0.470657\nstep 4060,loss:0.435792\nstep 4070,loss:0.480442\nstep 4080,loss:0.47409\nstep 4090,loss:0.462439\nstep 4100,loss:0.427819\nstep 4110,loss:0.497692\nstep 4120,loss:0.444085\nstep 4130,loss:0.453573\nstep 4140,loss:0.457706\nstep 4150,loss:0.46291\nstep 4160,loss:0.448489\nstep 4170,loss:0.446213\nstep 4180,loss:0.486591\nstep 4190,loss:0.458997\nstep 4200,loss:0.466148\nstep 4210,loss:0.459793\nstep 4220,loss:0.46849\nstep 4230,loss:0.459375\nstep 4240,loss:0.451954\nstep 4250,loss:0.448467\nstep 4260,loss:0.494582\nstep 4270,loss:0.463723\nstep 4280,loss:0.477505\nstep 4290,loss:0.469794\nstep 4300,loss:0.523975\nstep 4310,loss:0.46472\nstep 4320,loss:0.462866\nstep 4330,loss:0.443547\nstep 4340,loss:0.442839\nstep 4350,loss:0.481076\nstep 4360,loss:0.448745\nstep 4370,loss:0.469105\nstep 4380,loss:0.485431\nstep 4390,loss:0.485365\nstep 4400,loss:0.463638\nstep 4410,loss:0.470491\nstep 4420,loss:0.415986\nstep 4430,loss:0.417506\nstep 4440,loss:0.466226\nstep 4450,loss:0.478914\nstep 4460,loss:0.463682\nstep 4470,loss:0.427559\nstep 4480,loss:0.436833\nstep 4490,loss:0.456727\nstep 4500,loss:0.466219\n距离训练完一整轮剩余时间： 270.253361511603  分钟\nper_train_loss_avg: 0.4632134038805962 **********\nvaild_loss: 0.4489779\nstep 4510,loss:0.463341\nstep 4520,loss:0.49604\nstep 4530,loss:0.440236\nstep 4540,loss:0.460087\nstep 4550,loss:0.437721\nstep 4560,loss:0.459056\nstep 4570,loss:0.454703\nstep 4580,loss:0.465417\nstep 4590,loss:0.462836\nstep 4600,loss:0.44631\nstep 4610,loss:0.476796\nstep 4620,loss:0.479689\nstep 4630,loss:0.424209\nstep 4640,loss:0.45757\nstep 4650,loss:0.462773\nstep 4660,loss:0.486268\nstep 4670,loss:0.439349\nstep 4680,loss:0.442644\nstep 4690,loss:0.489258\nstep 4700,loss:0.477247\nstep 4710,loss:0.428271\nstep 4720,loss:0.454851\nstep 4730,loss:0.438002\nstep 4740,loss:0.475157\nstep 4750,loss:0.467378\nstep 4760,loss:0.468996\nstep 4770,loss:0.43303\nstep 4780,loss:0.46144\nstep 4790,loss:0.492412\nstep 4800,loss:0.472949\nstep 4810,loss:0.484041\nstep 4820,loss:0.481326\nstep 4830,loss:0.429192\nstep 4840,loss:0.461399\nstep 4850,loss:0.482202\nstep 4860,loss:0.477213\nstep 4870,loss:0.485022\nstep 4880,loss:0.486321\nstep 4890,loss:0.455751\nstep 4900,loss:0.466861\nstep 4910,loss:0.452087\nstep 4920,loss:0.465243\nstep 4930,loss:0.450198\nstep 4940,loss:0.451267\nstep 4950,loss:0.469515\nstep 4960,loss:0.481942\nstep 4970,loss:0.430867\nstep 4980,loss:0.464941\nstep 4990,loss:0.439285\nstep 5000,loss:0.484011\n距离训练完一整轮剩余时间： 265.8355125673115  分钟\nper_train_loss_avg: 0.462044109582901 **********\nvaild_loss: 0.45988926\nper_vaild_loss_avg: 0.4627296060323715 ************\n","name":"stdout"},{"output_type":"error","ename":"ValueError","evalue":"Parent directory of /home/kesci/work/nn/tf_model_text_cnn_diin/rnn doesn't exist, can't save.","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mNotFoundError\u001b[0m                             Traceback (most recent call last)","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1333\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m       \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1335\u001b[0m     \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m   1318\u001b[0m       return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m           options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m   1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m   1406\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m         run_metadata)\n\u001b[0m\u001b[1;32m   1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mNotFoundError\u001b[0m: /home/kesci/work/nn/tf_model_text_cnn_diin; No such file or directory\n\t [[{{node save_1/SaveV2}} = SaveV2[dtypes=[DT_INT32, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, ..., DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](_arg_save_1/Const_0_0, save_1/SaveV2/tensor_names, save_1/SaveV2/shape_and_slices, Global_Step, beta1_power/_49, beta2_power/_51, conv-maxpool-2/W_filter/_53, conv-maxpool-2/beta_filter/_55, conv-maxpool-2/conv-maxpool-2/moments/Squeeze/ExponentialMovingAverage/_57, conv-maxpool-2/conv-maxpool-2/moments/Squeeze_1/ExponentialMovingAverage/_59, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze/ExponentialMovingAverage/_61, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze_1/ExponentialMovingAverage/_63, conv-maxpool-2_1/W_filter/_65, conv-maxpool-2_1/beta_filter/_67, conv-maxpool-3/W_filter/_69, conv-maxpool-3/beta_filter/_71, conv-maxpool-3/conv-maxpool-3/moments/Squeeze/ExponentialMovingAverage/_73, conv-maxpool-3/conv-maxpool-3/moments/Squeeze_1/ExponentialMovingAverage/_75, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze/ExponentialMovingAverage/_77, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze_1/ExponentialMovingAverage/_79, conv-maxpool-3_1/W_filter/_81, conv-maxpool-3_1/beta_filter/_83, conv-maxpool-4/W_filter/_85, conv-maxpool-4/beta_filter/_87, conv-maxpool-4/conv-maxpool-4/moments/Squeeze/ExponentialMovingAverage/_89, conv-maxpool-4/conv-maxpool-4/moments/Squeeze_1/ExponentialMovingAverage/_91, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze/ExponentialMovingAverage/_93, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze_1/ExponentialMovingAverage/_95, conv-maxpool-4_1/W_filter/_97, conv-maxpool-4_1/beta_filter/_99, conv-maxpool-5/W_filter/_101, conv-maxpool-5/beta_filter/_103, conv-maxpool-5/conv-maxpool-5/moments/Squeeze/ExponentialMovingAverage/_105, conv-maxpool-5/conv-maxpool-5/moments/Squeeze_1/ExponentialMovingAverage/_107, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze/ExponentialMovingAverage/_109, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze_1/ExponentialMovingAverage/_111, conv-maxpool-5_1/W_filter/_113, conv-maxpool-5_1/beta_filter/_115, conv-maxpool-7/W_filter/_117, conv-maxpool-7/beta_filter/_119, conv-maxpool-7/conv-maxpool-7/moments/Squeeze/ExponentialMovingAverage/_121, conv-maxpool-7/conv-maxpool-7/moments/Squeeze_1/ExponentialMovingAverage/_123, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze/ExponentialMovingAverage/_125, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze_1/ExponentialMovingAverage/_127, conv-maxpool-7_1/W_filter/_129, conv-maxpool-7_1/beta_filter/_131, conv2d/bias/_133, conv2d/bias/Adam/_135, conv2d/bias/Adam_1/_137, conv2d/kernel/_139, conv2d/kernel/Adam/_141, conv2d/kernel/Adam_1/_143, conv2d_1/bias/_145, conv2d_1/bias/Adam/_147, conv2d_1/bias/Adam_1/_149, conv2d_1/kernel/_151, conv2d_1/kernel/Adam/_153, conv2d_1/kernel/Adam_1/_155, conv2d_10/bias/_157, conv2d_10/bias/Adam/_159, conv2d_10/bias/Adam_1/_161, conv2d_10/kernel/_163, conv2d_10/kernel/Adam/_165, conv2d_10/kernel/Adam_1/_167, conv2d_11/bias/_169, conv2d_11/bias/Adam/_171, conv2d_11/bias/Adam_1/_173, conv2d_11/kernel/_175, conv2d_11/kernel/Adam/_177, conv2d_11/kernel/Adam_1/_179, conv2d_12/bias/_181, conv2d_12/bias/Adam/_183, conv2d_12/bias/Adam_1/_185, conv2d_12/kernel/_187, conv2d_12/kernel/Adam/_189, conv2d_12/kernel/Adam_1/_191, conv2d_2/bias/_193, conv2d_2/bias/Adam/_195, conv2d_2/bias/Adam_1/_197, conv2d_2/kernel/_199, conv2d_2/kernel/Adam/_201, conv2d_2/kernel/Adam_1/_203, conv2d_3/bias/_205, conv2d_3/bias/Adam/_207, conv2d_3/bias/Adam_1/_209, conv2d_3/kernel/_211, conv2d_3/kernel/Adam/_213, conv2d_3/kernel/Adam_1/_215, conv2d_4/bias/_217, conv2d_4/bias/Adam/_219, conv2d_4/bias/Adam_1/_221, conv2d_4/kernel/_223, conv2d_4/kernel/Adam/_225, conv2d_4/kernel/Adam_1/_227, conv2d_5/bias/_229, conv2d_5/bias/Adam/_231, conv2d_5/bias/Adam_1/_233, conv2d_5/kernel/_235, conv2d_5/kernel/Adam/_237, conv2d_5/kernel/Adam_1/_239, conv2d_6/bias/_241, conv2d_6/bias/Adam/_243, conv2d_6/bias/Adam_1/_245, conv2d_6/kernel/_247, conv2d_6/kernel/Adam/_249, conv2d_6/kernel/Adam_1/_251, conv2d_7/bias/_253, conv2d_7/bias/Adam/_255, conv2d_7/bias/Adam_1/_257, conv2d_7/kernel/_259, conv2d_7/kernel/Adam/_261, conv2d_7/kernel/Adam_1/_263, conv2d_8/bias/_265, conv2d_8/bias/Adam/_267, conv2d_8/bias/Adam_1/_269, conv2d_8/kernel/_271, conv2d_8/kernel/Adam/_273, conv2d_8/kernel/Adam_1/_275, conv2d_9/bias/_277, conv2d_9/bias/Adam/_279, conv2d_9/bias/Adam_1/_281, conv2d_9/kernel/_283, conv2d_9/kernel/Adam/_285, conv2d_9/kernel/Adam_1/_287, dense/bias/_289, dense/bias/Adam/_291, dense/bias/Adam_1/_293, dense/kernel/_295, dense/kernel/Adam/_297, dense/kernel/Adam_1/_299, dense_1/bias/_301, dense_1/bias/Adam/_303, dense_1/bias/Adam_1/_305, dense_1/kernel/_307, dense_1/kernel/Adam/_309, dense_1/kernel/Adam_1/_311, fc-bn-layer/Variable/_313, fc-bn-layer/Weight_fc/_315, fc-bn-layer/fc-bn-layer/moments/Squeeze/ExponentialMovingAverage/_317, fc-bn-layer/fc-bn-layer/moments/Squeeze_1/ExponentialMovingAverage/_319, gate_b1/_321, gate_b1/Adam/_323, gate_b1/Adam_1/_325, gate_b2/_327, gate_b2/Adam/_329, gate_b2/Adam_1/_331, gate_b3/_333, gate_b3/Adam/_335, gate_b3/Adam_1/_337, gate_w1/_339, gate_w1/Adam/_341, gate_w1/Adam_1/_343, gate_w2/_345, gate_w2/Adam/_347, gate_w2/Adam_1/_349, gate_w3/_351, gate_w3/Adam/_353, gate_w3/Adam_1/_355, out_layer/Weight_out/_357, out_layer/bias_out/_359, self_w/_361, self_w/Adam/_363, self_w/Adam_1/_365)]]","\nDuring handling of the above exception, another exception occurred:\n","\u001b[0;31mNotFoundError\u001b[0m                             Traceback (most recent call last)","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\u001b[0m in \u001b[0;36msave\u001b[0;34m(self, sess, save_path, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)\u001b[0m\n\u001b[1;32m   1440\u001b[0m               \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msaver_def\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave_tensor_name\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1441\u001b[0;31m               {self.saver_def.filename_tensor_name: checkpoint_file})\n\u001b[0m\u001b[1;32m   1442\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m    928\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m                          run_metadata_ptr)\n\u001b[0m\u001b[1;32m    930\u001b[0m       \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1151\u001b[0m       results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m                              feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m   1153\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m   1327\u001b[0m       return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m                            run_metadata)\n\u001b[0m\u001b[1;32m   1329\u001b[0m     \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m   1347\u001b[0m       \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m       \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mNotFoundError\u001b[0m: /home/kesci/work/nn/tf_model_text_cnn_diin; No such file or directory\n\t [[node save_1/SaveV2 (defined at <ipython-input-1-42778d896fb0>:459)  = SaveV2[dtypes=[DT_INT32, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, ..., DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](_arg_save_1/Const_0_0, save_1/SaveV2/tensor_names, save_1/SaveV2/shape_and_slices, Global_Step, beta1_power/_49, beta2_power/_51, conv-maxpool-2/W_filter/_53, conv-maxpool-2/beta_filter/_55, conv-maxpool-2/conv-maxpool-2/moments/Squeeze/ExponentialMovingAverage/_57, conv-maxpool-2/conv-maxpool-2/moments/Squeeze_1/ExponentialMovingAverage/_59, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze/ExponentialMovingAverage/_61, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze_1/ExponentialMovingAverage/_63, conv-maxpool-2_1/W_filter/_65, conv-maxpool-2_1/beta_filter/_67, conv-maxpool-3/W_filter/_69, conv-maxpool-3/beta_filter/_71, conv-maxpool-3/conv-maxpool-3/moments/Squeeze/ExponentialMovingAverage/_73, conv-maxpool-3/conv-maxpool-3/moments/Squeeze_1/ExponentialMovingAverage/_75, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze/ExponentialMovingAverage/_77, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze_1/ExponentialMovingAverage/_79, conv-maxpool-3_1/W_filter/_81, conv-maxpool-3_1/beta_filter/_83, conv-maxpool-4/W_filter/_85, conv-maxpool-4/beta_filter/_87, conv-maxpool-4/conv-maxpool-4/moments/Squeeze/ExponentialMovingAverage/_89, conv-maxpool-4/conv-maxpool-4/moments/Squeeze_1/ExponentialMovingAverage/_91, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze/ExponentialMovingAverage/_93, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze_1/ExponentialMovingAverage/_95, conv-maxpool-4_1/W_filter/_97, conv-maxpool-4_1/beta_filter/_99, conv-maxpool-5/W_filter/_101, conv-maxpool-5/beta_filter/_103, conv-maxpool-5/conv-maxpool-5/moments/Squeeze/ExponentialMovingAverage/_105, conv-maxpool-5/conv-maxpool-5/moments/Squeeze_1/ExponentialMovingAverage/_107, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze/ExponentialMovingAverage/_109, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze_1/ExponentialMovingAverage/_111, conv-maxpool-5_1/W_filter/_113, conv-maxpool-5_1/beta_filter/_115, conv-maxpool-7/W_filter/_117, conv-maxpool-7/beta_filter/_119, conv-maxpool-7/conv-maxpool-7/moments/Squeeze/ExponentialMovingAverage/_121, conv-maxpool-7/conv-maxpool-7/moments/Squeeze_1/ExponentialMovingAverage/_123, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze/ExponentialMovingAverage/_125, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze_1/ExponentialMovingAverage/_127, conv-maxpool-7_1/W_filter/_129, conv-maxpool-7_1/beta_filter/_131, conv2d/bias/_133, conv2d/bias/Adam/_135, conv2d/bias/Adam_1/_137, conv2d/kernel/_139, conv2d/kernel/Adam/_141, conv2d/kernel/Adam_1/_143, conv2d_1/bias/_145, conv2d_1/bias/Adam/_147, conv2d_1/bias/Adam_1/_149, conv2d_1/kernel/_151, conv2d_1/kernel/Adam/_153, conv2d_1/kernel/Adam_1/_155, conv2d_10/bias/_157, conv2d_10/bias/Adam/_159, conv2d_10/bias/Adam_1/_161, conv2d_10/kernel/_163, conv2d_10/kernel/Adam/_165, conv2d_10/kernel/Adam_1/_167, conv2d_11/bias/_169, conv2d_11/bias/Adam/_171, conv2d_11/bias/Adam_1/_173, conv2d_11/kernel/_175, conv2d_11/kernel/Adam/_177, conv2d_11/kernel/Adam_1/_179, conv2d_12/bias/_181, conv2d_12/bias/Adam/_183, conv2d_12/bias/Adam_1/_185, conv2d_12/kernel/_187, conv2d_12/kernel/Adam/_189, conv2d_12/kernel/Adam_1/_191, conv2d_2/bias/_193, conv2d_2/bias/Adam/_195, conv2d_2/bias/Adam_1/_197, conv2d_2/kernel/_199, conv2d_2/kernel/Adam/_201, conv2d_2/kernel/Adam_1/_203, conv2d_3/bias/_205, conv2d_3/bias/Adam/_207, conv2d_3/bias/Adam_1/_209, conv2d_3/kernel/_211, conv2d_3/kernel/Adam/_213, conv2d_3/kernel/Adam_1/_215, conv2d_4/bias/_217, conv2d_4/bias/Adam/_219, conv2d_4/bias/Adam_1/_221, conv2d_4/kernel/_223, conv2d_4/kernel/Adam/_225, conv2d_4/kernel/Adam_1/_227, conv2d_5/bias/_229, conv2d_5/bias/Adam/_231, conv2d_5/bias/Adam_1/_233, conv2d_5/kernel/_235, conv2d_5/kernel/Adam/_237, conv2d_5/kernel/Adam_1/_239, conv2d_6/bias/_241, conv2d_6/bias/Adam/_243, conv2d_6/bias/Adam_1/_245, conv2d_6/kernel/_247, conv2d_6/kernel/Adam/_249, conv2d_6/kernel/Adam_1/_251, conv2d_7/bias/_253, conv2d_7/bias/Adam/_255, conv2d_7/bias/Adam_1/_257, conv2d_7/kernel/_259, conv2d_7/kernel/Adam/_261, conv2d_7/kernel/Adam_1/_263, conv2d_8/bias/_265, conv2d_8/bias/Adam/_267, conv2d_8/bias/Adam_1/_269, conv2d_8/kernel/_271, conv2d_8/kernel/Adam/_273, conv2d_8/kernel/Adam_1/_275, conv2d_9/bias/_277, conv2d_9/bias/Adam/_279, conv2d_9/bias/Adam_1/_281, conv2d_9/kernel/_283, conv2d_9/kernel/Adam/_285, conv2d_9/kernel/Adam_1/_287, dense/bias/_289, dense/bias/Adam/_291, dense/bias/Adam_1/_293, dense/kernel/_295, dense/kernel/Adam/_297, dense/kernel/Adam_1/_299, dense_1/bias/_301, dense_1/bias/Adam/_303, dense_1/bias/Adam_1/_305, dense_1/kernel/_307, dense_1/kernel/Adam/_309, dense_1/kernel/Adam_1/_311, fc-bn-layer/Variable/_313, fc-bn-layer/Weight_fc/_315, fc-bn-layer/fc-bn-layer/moments/Squeeze/ExponentialMovingAverage/_317, fc-bn-layer/fc-bn-layer/moments/Squeeze_1/ExponentialMovingAverage/_319, gate_b1/_321, gate_b1/Adam/_323, gate_b1/Adam_1/_325, gate_b2/_327, gate_b2/Adam/_329, gate_b2/Adam_1/_331, gate_b3/_333, gate_b3/Adam/_335, gate_b3/Adam_1/_337, gate_w1/_339, gate_w1/Adam/_341, gate_w1/Adam_1/_343, gate_w2/_345, gate_w2/Adam/_347, gate_w2/Adam_1/_349, gate_w3/_351, gate_w3/Adam/_353, gate_w3/Adam_1/_355, out_layer/Weight_out/_357, out_layer/bias_out/_359, self_w/_361, self_w/Adam/_363, self_w/Adam_1/_365)]]\n\nCaused by op 'save_1/SaveV2', defined at:\n  File \"/opt/conda/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n    \"__main__\", mod_spec)\n  File \"/opt/conda/lib/python3.6/runpy.py\", line 85, in _run_code\n    exec(code, run_globals)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in <module>\n    app.launch_new_instance()\n  File \"/opt/conda/lib/python3.6/site-packages/traitlets/config/application.py\", line 658, in launch_instance\n    app.start()\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 505, in start\n    self.io_loop.start()\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/ioloop.py\", line 832, in start\n    self._run_callback(self._callbacks.popleft())\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/ioloop.py\", line 605, in _run_callback\n    ret = callback()\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/stack_context.py\", line 277, in null_wrapper\n    return fn(*args, **kwargs)\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/gen.py\", line 1152, in inner\n    self.run()\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/gen.py\", line 1069, in run\n    yielded = self.gen.send(value)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 365, in process_one\n    yield gen.maybe_future(dispatch(*args))\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/gen.py\", line 307, in wrapper\n    yielded = next(result)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 272, in dispatch_shell\n    yield gen.maybe_future(handler(stream, idents, msg))\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/gen.py\", line 307, in wrapper\n    yielded = next(result)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 542, in execute_request\n    user_expressions, allow_stdin,\n  File \"/opt/conda/lib/python3.6/site-packages/tornado/gen.py\", line 307, in wrapper\n    yielded = next(result)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 294, in do_execute\n    res = shell.run_cell(code, store_history=store_history, silent=silent)\n  File \"/opt/conda/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2854, in run_cell\n    raw_cell, store_history, silent, shell_futures)\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2880, in _run_cell\n    return runner(coro)\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/async_helpers.py\", line 68, in _pseudo_sync_runner\n    coro.send(None)\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3057, in run_cell_async\n    interactivity=interactivity, compiler=compiler, result=result)\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3248, in run_ast_nodes\n    if (await self.run_code(code, result,  async_=asy)):\n  File \"/opt/conda/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 3325, in run_code\n    exec(code_obj, self.user_global_ns, self.user_ns)\n  File \"<ipython-input-1-42778d896fb0>\", line 459, in <module>\n    saver = tf.train.Saver(max_to_keep = 3)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1102, in __init__\n    self.build()\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1114, in build\n    self._build(self._filename, build_save=True, build_restore=True)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1151, in _build\n    build_save=build_save, build_restore=build_restore)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 792, in _build_internal\n    save_tensor = self._AddSaveOps(filename_tensor, saveables)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 284, in _AddSaveOps\n    save = self.save_op(filename_tensor, saveables)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 202, in save_op\n    tensors)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/gen_io_ops.py\", line 1690, in save_v2\n    shape_and_slices=shape_and_slices, tensors=tensors, name=name)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py\", line 787, in _apply_op_helper\n    op_def=op_def)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n    return func(*args, **kwargs)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3274, in create_op\n    op_def=op_def)\n  File \"/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n    self._traceback = tf_stack.extract_stack()\n\nNotFoundError (see above for traceback): /home/kesci/work/nn/tf_model_text_cnn_diin; No such file or directory\n\t [[node save_1/SaveV2 (defined at <ipython-input-1-42778d896fb0>:459)  = SaveV2[dtypes=[DT_INT32, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, ..., DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](_arg_save_1/Const_0_0, save_1/SaveV2/tensor_names, save_1/SaveV2/shape_and_slices, Global_Step, beta1_power/_49, beta2_power/_51, conv-maxpool-2/W_filter/_53, conv-maxpool-2/beta_filter/_55, conv-maxpool-2/conv-maxpool-2/moments/Squeeze/ExponentialMovingAverage/_57, conv-maxpool-2/conv-maxpool-2/moments/Squeeze_1/ExponentialMovingAverage/_59, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze/ExponentialMovingAverage/_61, conv-maxpool-2/conv-maxpool-2_1/moments/Squeeze_1/ExponentialMovingAverage/_63, conv-maxpool-2_1/W_filter/_65, conv-maxpool-2_1/beta_filter/_67, conv-maxpool-3/W_filter/_69, conv-maxpool-3/beta_filter/_71, conv-maxpool-3/conv-maxpool-3/moments/Squeeze/ExponentialMovingAverage/_73, conv-maxpool-3/conv-maxpool-3/moments/Squeeze_1/ExponentialMovingAverage/_75, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze/ExponentialMovingAverage/_77, conv-maxpool-3/conv-maxpool-3_1/moments/Squeeze_1/ExponentialMovingAverage/_79, conv-maxpool-3_1/W_filter/_81, conv-maxpool-3_1/beta_filter/_83, conv-maxpool-4/W_filter/_85, conv-maxpool-4/beta_filter/_87, conv-maxpool-4/conv-maxpool-4/moments/Squeeze/ExponentialMovingAverage/_89, conv-maxpool-4/conv-maxpool-4/moments/Squeeze_1/ExponentialMovingAverage/_91, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze/ExponentialMovingAverage/_93, conv-maxpool-4/conv-maxpool-4_1/moments/Squeeze_1/ExponentialMovingAverage/_95, conv-maxpool-4_1/W_filter/_97, conv-maxpool-4_1/beta_filter/_99, conv-maxpool-5/W_filter/_101, conv-maxpool-5/beta_filter/_103, conv-maxpool-5/conv-maxpool-5/moments/Squeeze/ExponentialMovingAverage/_105, conv-maxpool-5/conv-maxpool-5/moments/Squeeze_1/ExponentialMovingAverage/_107, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze/ExponentialMovingAverage/_109, conv-maxpool-5/conv-maxpool-5_1/moments/Squeeze_1/ExponentialMovingAverage/_111, conv-maxpool-5_1/W_filter/_113, conv-maxpool-5_1/beta_filter/_115, conv-maxpool-7/W_filter/_117, conv-maxpool-7/beta_filter/_119, conv-maxpool-7/conv-maxpool-7/moments/Squeeze/ExponentialMovingAverage/_121, conv-maxpool-7/conv-maxpool-7/moments/Squeeze_1/ExponentialMovingAverage/_123, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze/ExponentialMovingAverage/_125, conv-maxpool-7/conv-maxpool-7_1/moments/Squeeze_1/ExponentialMovingAverage/_127, conv-maxpool-7_1/W_filter/_129, conv-maxpool-7_1/beta_filter/_131, conv2d/bias/_133, conv2d/bias/Adam/_135, conv2d/bias/Adam_1/_137, conv2d/kernel/_139, conv2d/kernel/Adam/_141, conv2d/kernel/Adam_1/_143, conv2d_1/bias/_145, conv2d_1/bias/Adam/_147, conv2d_1/bias/Adam_1/_149, conv2d_1/kernel/_151, conv2d_1/kernel/Adam/_153, conv2d_1/kernel/Adam_1/_155, conv2d_10/bias/_157, conv2d_10/bias/Adam/_159, conv2d_10/bias/Adam_1/_161, conv2d_10/kernel/_163, conv2d_10/kernel/Adam/_165, conv2d_10/kernel/Adam_1/_167, conv2d_11/bias/_169, conv2d_11/bias/Adam/_171, conv2d_11/bias/Adam_1/_173, conv2d_11/kernel/_175, conv2d_11/kernel/Adam/_177, conv2d_11/kernel/Adam_1/_179, conv2d_12/bias/_181, conv2d_12/bias/Adam/_183, conv2d_12/bias/Adam_1/_185, conv2d_12/kernel/_187, conv2d_12/kernel/Adam/_189, conv2d_12/kernel/Adam_1/_191, conv2d_2/bias/_193, conv2d_2/bias/Adam/_195, conv2d_2/bias/Adam_1/_197, conv2d_2/kernel/_199, conv2d_2/kernel/Adam/_201, conv2d_2/kernel/Adam_1/_203, conv2d_3/bias/_205, conv2d_3/bias/Adam/_207, conv2d_3/bias/Adam_1/_209, conv2d_3/kernel/_211, conv2d_3/kernel/Adam/_213, conv2d_3/kernel/Adam_1/_215, conv2d_4/bias/_217, conv2d_4/bias/Adam/_219, conv2d_4/bias/Adam_1/_221, conv2d_4/kernel/_223, conv2d_4/kernel/Adam/_225, conv2d_4/kernel/Adam_1/_227, conv2d_5/bias/_229, conv2d_5/bias/Adam/_231, conv2d_5/bias/Adam_1/_233, conv2d_5/kernel/_235, conv2d_5/kernel/Adam/_237, conv2d_5/kernel/Adam_1/_239, conv2d_6/bias/_241, conv2d_6/bias/Adam/_243, conv2d_6/bias/Adam_1/_245, conv2d_6/kernel/_247, conv2d_6/kernel/Adam/_249, conv2d_6/kernel/Adam_1/_251, conv2d_7/bias/_253, conv2d_7/bias/Adam/_255, conv2d_7/bias/Adam_1/_257, conv2d_7/kernel/_259, conv2d_7/kernel/Adam/_261, conv2d_7/kernel/Adam_1/_263, conv2d_8/bias/_265, conv2d_8/bias/Adam/_267, conv2d_8/bias/Adam_1/_269, conv2d_8/kernel/_271, conv2d_8/kernel/Adam/_273, conv2d_8/kernel/Adam_1/_275, conv2d_9/bias/_277, conv2d_9/bias/Adam/_279, conv2d_9/bias/Adam_1/_281, conv2d_9/kernel/_283, conv2d_9/kernel/Adam/_285, conv2d_9/kernel/Adam_1/_287, dense/bias/_289, dense/bias/Adam/_291, dense/bias/Adam_1/_293, dense/kernel/_295, dense/kernel/Adam/_297, dense/kernel/Adam_1/_299, dense_1/bias/_301, dense_1/bias/Adam/_303, dense_1/bias/Adam_1/_305, dense_1/kernel/_307, dense_1/kernel/Adam/_309, dense_1/kernel/Adam_1/_311, fc-bn-layer/Variable/_313, fc-bn-layer/Weight_fc/_315, fc-bn-layer/fc-bn-layer/moments/Squeeze/ExponentialMovingAverage/_317, fc-bn-layer/fc-bn-layer/moments/Squeeze_1/ExponentialMovingAverage/_319, gate_b1/_321, gate_b1/Adam/_323, gate_b1/Adam_1/_325, gate_b2/_327, gate_b2/Adam/_329, gate_b2/Adam_1/_331, gate_b3/_333, gate_b3/Adam/_335, gate_b3/Adam_1/_337, gate_w1/_339, gate_w1/Adam/_341, gate_w1/Adam_1/_343, gate_w2/_345, gate_w2/Adam/_347, gate_w2/Adam_1/_349, gate_w3/_351, gate_w3/Adam/_353, gate_w3/Adam_1/_355, out_layer/Weight_out/_357, out_layer/bias_out/_359, self_w/_361, self_w/Adam/_363, self_w/Adam_1/_365)]]\n","\nDuring handling of the above exception, another exception occurred:\n","\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)","\u001b[0;32m<ipython-input-1-42778d896fb0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m    458\u001b[0m         \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"per_vaild_loss_avg:\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mper_vaild_loss_avg\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"************\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    459\u001b[0m         \u001b[0msaver\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSaver\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_to_keep\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 460\u001b[0;31m         \u001b[0msaver\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msaver\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"/home/kesci/work/nn/tf_model_text_cnn_diin/rnn\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mglobal_step\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/opt/conda/lib/python3.6/site-packages/tensorflow/python/training/saver.py\u001b[0m in \u001b[0;36msave\u001b[0;34m(self, sess, save_path, global_step, latest_filename, meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)\u001b[0m\n\u001b[1;32m   1456\u001b[0m               \"Parent directory of {} doesn't exist, can't save.\".format(\n\u001b[1;32m   1457\u001b[0m                   save_path))\n\u001b[0;32m-> 1458\u001b[0;31m         \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1459\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1460\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mwrite_meta_graph\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mValueError\u001b[0m: Parent directory of /home/kesci/work/nn/tf_model_text_cnn_diin/rnn doesn't exist, can't save."]}],"execution_count":1,"source":"# -*- coding:utf-8 -*-\r\n\r\nimport os\r\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"  \r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom tensorflow.contrib import rnn\r\n\r\nimport tensorflow.contrib.layers as layers\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import StratifiedShuffleSplit\r\n\r\nimport tensorflow as tf\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom gensim.models import Word2Vec\r\n\r\n\"\"\"wd_5_bigru_cnn\r\n\r\n在论文 Recurrent Convolutional Neural Networks for Text Classification 中。\r\n\r\n使用 BiRNN 处理，将每个时刻的隐藏状态和原输入拼起来，在进行 max_pooling 操作。\r\n\r\n这里有些不同，首先也是使用 bigru 得到每个时刻的，将每个时刻的隐藏状态和原输入拼起来；\r\n\r\n然后使用输入到 TextCNN 网络中。\r\n\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\nclass Settings(object):\r\n\r\n    def __init__(self):\r\n\r\n\r\n        self.model_name = 'wd_1_2_cnn_max'\r\n        self.query_len = 20\r\n        self.title_len = 20\r\n        self.filter_sizes = [2, 3, 4, 5, 7]\r\n        self.n_filter = 256\r\n        self.fc_hidden_size = 512\r\n        self.n_class = 1\r\n        self.train_batch_size=1024\r\n        self.vaild_batch_size=5000\r\n        self.lr=0.01\r\n        self.decay_step=7000\r\n        self.decay_rate=0.8\r\n        self.rnn_hidden_units=120\r\n        self.d=100\r\n        self.dense_eta = 0.3\r\n        self.dense_g = 20\r\n        self.dense_n = 8\r\n        self.dense_theta = 0.5\r\n\r\n\r\n\r\n\r\n\r\nclass TEXTCNN(object):\r\n\r\n    def __init__(self,  settings):\r\n\r\n\r\n        self.model_name = settings.model_name\r\n        self.query_len = settings.query_len\r\n        self.title_len = settings.title_len\r\n        self.filter_sizes = settings.filter_sizes\r\n        self.n_filter = settings.n_filter\r\n        self.n_filter_total = self.n_filter * len(self.filter_sizes)\r\n        self.n_class = settings.n_class\r\n        self.rnn_hidden_units=settings.rnn_hidden_units\r\n        self.fc_hidden_size = settings.fc_hidden_size\r\n        self._global_step = tf.Variable(0, trainable=False, name='Global_Step')\r\n        self.update_emas = list()\r\n        # placeholders\r\n        self._tst = tf.placeholder(tf.bool)\r\n        self._keep_prob = tf.placeholder(tf.float32, [])\r\n        self._batch_size = tf.placeholder(tf.int32, [])\r\n        self.embedding_size=100\r\n        self.d=settings.d\r\n        self.dense_eta = settings.dense_eta\r\n        self.dense_g = settings.dense_g\r\n        self.dense_n = settings.dense_n\r\n        self.dense_theta = settings.dense_theta\r\n\r\n        with tf.name_scope('Inputs'):\r\n            self.embedded_x_query=tf.placeholder(tf.float32,[None,self.query_len,self.embedding_size])#h r\r\n            self.embedded_x_title=tf.placeholder(tf.float32,[None,self.title_len,self.embedding_size])# r\r\n            self._y_inputs = tf.placeholder(tf.float32, [None, self.n_class], name='y_input')\r\n            \r\n\r\n\r\n        self.self_w = tf.get_variable(name='self_w', shape=(self.d * 3, 20))#######################\r\n        self.gate_w1 = tf.get_variable(name='gate_w1', shape=(self.d * 2, self.d))\r\n        self.gate_w2 = tf.get_variable(name='gate_w2', shape=(self.d * 2, self.d))\r\n        self.gate_w3 = tf.get_variable(name='gate_w3', shape=(self.d * 2, self.d))\r\n        self.gate_b1 = tf.get_variable(name='gate_b1', shape=(self.d,))\r\n        self.gate_b2 = tf.get_variable(name='gate_b2', shape=(self.d,))\r\n        self.gate_b3 = tf.get_variable(name='gate_b3', shape=(self.d,))\r\n        \r\n        \r\n        p = self.embedded_x_query\r\n        h = self.embedded_x_title\r\n        \r\n\r\n        # Encoding Layer\r\n        with tf.variable_scope('p_encode', reuse=None):\r\n            p_encode = self.encode(p)\r\n        with tf.variable_scope('h_encode', reuse=None):\r\n            h_encode = self.encode(h)\r\n        p_encode = self.dropout(p_encode)\r\n        h_encode = self.dropout(h_encode)\r\n\r\n        # Interaction Layer\r\n        I = tf.multiply(tf.expand_dims(p_encode, axis=2), tf.expand_dims(h_encode, axis=1))\r\n\r\n        # Feature Extraction Layer\r\n        dense_out = self.dense_net(I)\r\n        dense_out = self.dropout(dense_out)\r\n\r\n        # Output Layer\r\n        dense_out = tf.reshape(dense_out, shape=(-1, dense_out.shape[1] * dense_out.shape[2] * dense_out.shape[3]))\r\n        out = tf.layers.dense(dense_out, 256)\r\n        out = self.dropout(out)\r\n        self.distance = tf.layers.dense(out, 1)\r\n        \r\n        output_title = self.cnn_inference(self.embedded_x_query, self.query_len)\r\n        output_title = tf.expand_dims(output_title, 0)\r\n\r\n        output_content = self.cnn_inference(self.embedded_x_title, self.title_len)\r\n        output_content = tf.expand_dims(output_content, 0)\r\n        \r\n\r\n        with tf.variable_scope('fc-bn-layer'):\r\n            output = tf.concat([output_title, output_content], axis=0)\r\n            output = tf.reduce_max(output, axis=0)\r\n            W_fc = self.weight_variable([self.n_filter_total, self.fc_hidden_size], name='Weight_fc')\r\n            h_fc = tf.matmul(output, W_fc, name='h_fc')\r\n            beta_fc = tf.Variable(tf.constant(0.1, tf.float32, shape=[self.fc_hidden_size], name=\"beta_fc\"))\r\n            fc_bn, update_ema_fc = self.batchnorm(h_fc, beta_fc, convolutional=False)\r\n            self.update_emas.append(update_ema_fc)\r\n            self.fc_bn_relu = tf.nn.relu(fc_bn, name=\"relu\")\r\n            fc_bn_drop = tf.nn.dropout(self.fc_bn_relu, self.keep_prob)\r\n\r\n        with tf.variable_scope('out_layer'):\r\n            W_out = self.weight_variable([self.fc_hidden_size, self.n_class], name='Weight_out')\r\n            b_out = self.bias_variable([self.n_class], name='bias_out')\r\n            self._y_pred = tf.nn.xw_plus_b(fc_bn_drop, W_out, b_out, name='y_pred')  # 每个类别的分数 scores\r\n            self._y_pred = self.distance\r\n        with tf.name_scope('loss'):\r\n            self._loss = tf.reduce_mean(\r\n                tf.nn.sigmoid_cross_entropy_with_logits(logits=self._y_pred, labels=self._y_inputs))\r\n            self._y_pred = tf.sigmoid(self._y_pred)\r\n\r\n    @property\r\n    def tst(self):\r\n        return self._tst\r\n\r\n    @property\r\n    def keep_prob(self):\r\n        return self._keep_prob\r\n\r\n    @property\r\n    def batch_size(self):\r\n        return self._batch_size\r\n\r\n    @property\r\n    def global_step(self):\r\n        return self._global_step\r\n\r\n    @property\r\n    def X1_inputs(self):\r\n        return self._X1_inputs\r\n\r\n    @property\r\n    def X2_inputs(self):\r\n        return self._X2_inputs\r\n\r\n    @property\r\n    def y_inputs(self):\r\n        return self._y_inputs\r\n\r\n    @property\r\n    def y_pred(self):\r\n        return self._y_pred\r\n\r\n    @property\r\n    def loss(self):\r\n        return self._loss\r\n\r\n    def weight_variable(self, shape, name):\r\n        \"\"\"Create a weight variable with appropriate initialization.\"\"\"\r\n        initial = tf.truncated_normal(shape, stddev=0.1)\r\n        return tf.Variable(initial, name=name)\r\n\r\n    def bias_variable(self, shape, name):\r\n        \"\"\"Create a bias variable with appropriate initialization.\"\"\"\r\n        initial = tf.constant(0.1, shape=shape)\r\n        return tf.Variable(initial, name=name)\r\n\r\n    def batchnorm(self, Ylogits, offset, convolutional=False):\r\n        \"\"\"batchnormalization.\r\n        Args:\r\n            Ylogits: 1D向量或者是3D的卷积结果。\r\n            num_updates: 迭代的global_step\r\n            offset：表示beta，全局均值；在 RELU 激活中一般初始化为 0.1。\r\n            scale：表示lambda，全局方差；在 sigmoid 激活中需要，这 RELU 激活中作用不大。\r\n            m: 表示batch均值；v:表示batch方差。\r\n            bnepsilon：一个很小的浮点数，防止除以 0.\r\n        Returns:\r\n            Ybn: 和 Ylogits 的维度一样，就是经过 Batch Normalization 处理的结果。\r\n            update_moving_everages：更新mean和variance，主要是给最后的 test 使用。\r\n        \"\"\"\r\n        exp_moving_avg = tf.train.ExponentialMovingAverage(0.999,\r\n                                                           self._global_step)  # adding the iteration prevents from averaging across non-existing iterations\r\n        bnepsilon = 1e-5\r\n        if convolutional:\r\n            mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])\r\n        else:\r\n            mean, variance = tf.nn.moments(Ylogits, [0])\r\n        update_moving_everages = exp_moving_avg.apply([mean, variance])\r\n        m = tf.cond(self.tst, lambda: exp_moving_avg.average(mean), lambda: mean)\r\n        v = tf.cond(self.tst, lambda: exp_moving_avg.average(variance), lambda: variance)\r\n        Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)\r\n        return Ybn, update_moving_everages\r\n\r\n\r\n\r\n    def cnn_inference(self, X_inputs, n_step):\r\n        \"\"\"TextCNN 模型。\r\n        Args:\r\n            X_inputs: tensor.shape=(batch_size, n_step)\r\n        Returns:\r\n            title_outputs: tensor.shape=(batch_size, self.n_filter_total)\r\n        \"\"\"\r\n        inputs = X_inputs\r\n        inputs = tf.expand_dims(inputs, -1)\r\n        pooled_outputs = list()\r\n        for i, filter_size in enumerate(self.filter_sizes):\r\n            with tf.variable_scope(\"conv-maxpool-%s\" % filter_size):\r\n                # Convolution Layer\r\n                filter_shape = [filter_size, self.embedding_size, 1, self.n_filter]\r\n                W_filter = self.weight_variable(shape=filter_shape, name='W_filter')\r\n                beta = self.bias_variable(shape=[self.n_filter], name='beta_filter')\r\n                conv = tf.nn.conv2d(inputs, W_filter, strides=[1, 1, 1, 1], padding=\"VALID\", name=\"conv\")\r\n                conv_bn, update_ema = self.batchnorm(conv, beta, convolutional=True)  # 在激活层前面加 BN\r\n                # Apply nonlinearity, batch norm scaling is not useful with relus\r\n                # batch norm offsets are used instead of biases,使用 BN 层的 offset，不要 biases\r\n                h = tf.nn.relu(conv_bn, name=\"relu\")\r\n                # Maxpooling over the outputs\r\n                pooled = tf.nn.max_pool(h, ksize=[1, n_step - filter_size + 1, 1, 1],\r\n                                        strides=[1, 1, 1, 1], padding='VALID', name=\"pool\")\r\n                pooled_outputs.append(pooled)\r\n                self.update_emas.append(update_ema)\r\n        h_pool = tf.concat(pooled_outputs, 3)\r\n        h_pool_flat = tf.reshape(h_pool, [-1, self.n_filter_total])\r\n        return h_pool_flat  # shape = [batch_size, self.n_filter_total]\r\n\r\n\r\n\r\n    def dropout(self, x):\r\n        return tf.nn.dropout(x, self._keep_prob)\r\n\r\n    def encode(self, v):\r\n        attention = tf.einsum(\"ijk,kl->ijl\", tf.concat((v, v, tf.multiply(v, v)), axis=-1), self.self_w)\r\n        v_hat = tf.matmul(tf.nn.softmax(attention), v)\r\n\r\n        p_concat = tf.concat((v, v_hat), axis=-1)\r\n        z = tf.nn.tanh(tf.einsum(\"ijk,kl->ijl\", p_concat, self.gate_w1) + self.gate_b1)\r\n        r = tf.nn.sigmoid(tf.einsum(\"ijk,kl->ijl\", p_concat, self.gate_w2) + self.gate_b2)\r\n        f = tf.nn.sigmoid(tf.einsum(\"ijk,kl->ijl\", p_concat, self.gate_w3) + self.gate_b3)\r\n        res = tf.multiply(r, v) + tf.multiply(f, z)\r\n        return res\r\n    \r\n    def dense_net(self, v):\r\n        filters = self.d * self.dense_eta\r\n        v_in = tf.layers.conv2d(v, filters=filters, kernel_size=(1, 1))\r\n        for _ in range(2):\r\n            for _ in range(5):\r\n                v_out = tf.layers.conv2d(v_in,\r\n                                         filters=self.dense_g,\r\n                                         kernel_size=(3, 3),\r\n                                         padding='SAME',\r\n                                         activation='relu')\r\n                v_in = tf.concat((v_in, v_out), axis=-1)\r\n            transition = tf.layers.conv2d(v_in,\r\n                                          filters=int(v_in.shape[-1].value * self.dense_theta),\r\n                                          kernel_size=(1, 1))\r\n            transition_out = tf.layers.max_pooling2d(transition,\r\n                                                     pool_size=(2, 2),\r\n                                                     strides=2)\r\n            v_in = transition_out\r\n        return v_in\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef loader(data_store_path=''):############\r\n    # all_data=pd.read_csv(data_store_path+\"data_sets/train_feature_second_no_query_and_title_data.csv\")\r\n    # query_title=pd.read_csv(data_store_path+\"data_sets/train_data.csv\",usecols=['query','title'])\r\n    print(\"开始加载数据\")\r\n    all_data=pd.read_pickle(data_store_path+\"/home/kesci/query_title_temp.pickle\")\r\n    data_split=StratifiedShuffleSplit(n_splits=2,test_size=0.001,random_state=1)\r\n    train_index,vaild_index=data_split.split(all_data['label'],all_data['label']).__next__()\r\n    train_data=all_data.iloc[train_index]\r\n    vaild_data=all_data.iloc[vaild_index]\r\n    print(\"加载数据完成\")\r\n    return train_data,vaild_data\r\n\r\ndef batch_iter(data_x,data_y, batch_size, num_epochs, shuffle=True):#这个就是产生batch的，可以直接用\r\n    \"\"\"\r\n    Generates a batch iterator for a dataset.\r\n    \"\"\"\r\n    # data_x=data_x.tolist()\r\n    data=list(zip(data_x,data_y))\r\n    data=np.array(data)\r\n    data_size = len(data)\r\n    num_batches_per_epoch = int((len(data)-1)/batch_size) + 1\r\n    for epoch in range(num_epochs):\r\n        # Shuffle the data at each epoch\r\n        if shuffle:#如果为true 则代表允许随机取数据\r\n            shuffle_indices = np.random.permutation(np.arange(data_size))\r\n            shuffled_data = data[shuffle_indices]#随机取数据\r\n        else:\r\n            shuffled_data = data\r\n        for batch_num in range(num_batches_per_epoch):\r\n            start_index = batch_num * batch_size\r\n            end_index = min((batch_num + 1) * batch_size, data_size)\r\n            yield shuffled_data[start_index:end_index]#而不用一个个加到列表里了。这是一个batch，嗯哼\r\n            #取到的维度为（batchsize,2） y为(batchsize,1) 里面存的是序号\r\n\r\ndef get_w2v_array(word_list,max_len):\r\n    array = np.zeros((max_len, 100))\r\n    if len(word_list)<=max_len:\r\n        for i in range(len(word_list)):\r\n            if str(word_list[i]) in word_wv.vocab.keys():\r\n                array[i][:] = word_wv[str(word_list[i])]\r\n    else:\r\n        for i in range(max_len):\r\n            if str(word_list[i]) in word_wv.vocab.keys():\r\n                array[i][:] = word_wv[str(word_list[i])]\r\n    return array\r\n\r\n\r\nsettings = Settings()\r\n\r\ntrain_batches,vaild_data=loader()\r\n\r\ntrain_batches=np.array(train_batches)\r\ntrain_batches_y=train_batches[:,2]\r\ntrain_batches_x=np.delete(train_batches, 2, axis=1)######这里是除了label以外的维数\r\n\r\n\r\nbatches=batch_iter(train_batches_x,train_batches_y,settings.train_batch_size,4)\r\n\r\nvaild_batches=np.array(vaild_data)\r\nvaild_batches_y=vaild_batches[:,2]\r\nvaild_batches_x=np.delete(vaild_batches, 2, axis=1)\r\nvaild_data=batch_iter(vaild_batches_x,vaild_batches_y,settings.vaild_batch_size,1000)\r\n\r\nprint(\"train starting\")\r\nw2v_model = Word2Vec.load('w2v_model/w2v_all_data_model.txt')\r\nword_wv= w2v_model.wv\r\n\r\n\r\nsettings = Settings()\r\n\r\nsettings.all_features_num=train_batches_x.shape[1]-2###########自动设置数值型特征数量\r\nmodel = TEXTCNN( settings)\r\n\r\nlearning_rate = tf.train.exponential_decay(settings.lr, model.global_step, settings.decay_step,\r\n                                       settings.decay_rate, staircase=True)\r\noptimizer = tf.train.AdamOptimizer(learning_rate)\r\ngrads_and_vars=optimizer.compute_gradients(model.loss)\r\ntrain_op=optimizer.apply_gradients(grads_and_vars,global_step=model.global_step)\r\nsess = tf.Session() #启动创建的模型\r\nsess.run(tf.initialize_all_variables())\r\n\r\nsaver = tf.train.Saver()\r\n# saver.restore(sess, tf.train.latest_checkpoint('tf_model_rcnn/'))\r\nper_train_loss_avg_list=[]\r\nper_vaild_loss_avg_list=[]\r\nparts_time=[]\r\nfrom time import time\r\nfor batch in batches:\r\n    time1=time()\r\n    batch_x,batch_y=zip(*batch)\r\n    batch_y=np.array(batch_y)[:,None]\r\n    batch_x=[i.tolist() for i in batch_x]\r\n    batch_x=pd.DataFrame(batch_x)\r\n    batch_x[[0,1]]=batch_x[[0,1]].applymap(lambda x:x.split(' '))\r\n    batch_x=np.array(batch_x)\r\n    #####[None,title_len,embedding_size]\r\n    #np.apply_along_axis(lambda x:[5,5,5,5,5] if x[0]==2 else x,axis=2,arr=a)\r\n    #query   title   title_count         title_ctr      label\r\n    batch_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=batch_x)\r\n    batch_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=batch_x)\r\n    #     train_op=optimizer.apply_gradients(grads_and_vars,global_step=global_step)                              \r\n    feed_dict = {model.embedded_x_query: batch_x_query,\r\n                 model.embedded_x_title: batch_x_title,\r\n                 model.y_inputs: batch_y,\r\n                 model.batch_size: batch_x.shape[0], \r\n                 model.tst: False, \r\n                 model.keep_prob: 0.8}\r\n\r\n    _,step,loss_out=sess.run([train_op,model.global_step,model.loss],feed_dict)\r\n    time2=time()\r\n    parts_time.append(time2-time1)\r\n    if step%10==0:\r\n        print(\"step {},loss:{:g}\".format(step,loss_out))              \r\n    per_train_loss_avg_list.append(loss_out)\r\n    \r\n    \r\n    if step%(int(50000000/1004))==0:\r\n        print(\"batch ended\")\r\n    if step%500==0:\r\n        time_single_part=sum(parts_time)\r\n        ed_time=(50000000/settings.train_batch_size-step)/500*time_single_part\r\n        print(\"距离训练完一整轮剩余时间：\",ed_time/60,\" 分钟\")\r\n        parts_time=[]\r\n        vaild_x,vaild_y=zip(*(vaild_data.__next__()))\r\n        vaild_y=np.array(vaild_y)[:,None]\r\n        vaild_x=[i.tolist() for i in vaild_x]\r\n        vaild_x=pd.DataFrame(vaild_x)\r\n        vaild_x[[0,1]]=vaild_x[[0,1]].applymap(lambda x:x.split(' '))\r\n        vaild_x=np.array(vaild_x)\r\n    \r\n        vaild_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=vaild_x)\r\n        vaild_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=vaild_x)\r\n\r\n        feed_dict = {model.embedded_x_query: vaild_x_query,\r\n                     model.embedded_x_title: vaild_x_title,\r\n                     model.y_inputs: vaild_y,\r\n                     model.batch_size: vaild_x.shape[0], \r\n                     model.tst: False, \r\n                     model.keep_prob: 1}\r\n    \r\n        step,loss_out=sess.run([model.global_step,model.loss],feed_dict)\r\n        per_vaild_loss_avg_list.append(loss_out)\r\n        per_train_loss_avg=sum(per_train_loss_avg_list)/len(per_train_loss_avg_list)\r\n        print(\"per_train_loss_avg:\",per_train_loss_avg,\"**********\")\r\n        per_train_loss_avg_list=[]\r\n        print(\"vaild_loss:\",loss_out)\r\n    if step%5000==0:\r\n        per_vaild_loss_avg=sum(per_vaild_loss_avg_list)/len(per_vaild_loss_avg_list)\r\n        per_vaild_loss_avg_list=[]\r\n        print(\"per_vaild_loss_avg:\",per_vaild_loss_avg,\"************\")\r\n        saver = tf.train.Saver(max_to_keep = 3)\r\n        saver = saver.save(sess, \"/home/kesci/work/nn/tf_model_text_cnn_diin/rnn\", global_step=step)","cell_type":"code","metadata":{"trusted":true,"collapsed":false,"id":"3B297DBF07C543719D2255C1EC98FDB1","scrolled":true}},{"metadata":{"id":"3372939A06AE45E782FA0DDB6E35A4FB","collapsed":false},"cell_type":"code","outputs":[{"output_type":"stream","text":"step 5010,loss:0.451126\nstep 5020,loss:0.476293\nstep 5030,loss:0.476722\nstep 5040,loss:0.451962\nstep 5050,loss:0.476255\nstep 5060,loss:0.481179\nstep 5070,loss:0.496338\nstep 5080,loss:0.434733\nstep 5090,loss:0.445505\nstep 5100,loss:0.49927\nstep 5110,loss:0.477132\nstep 5120,loss:0.511242\nstep 5130,loss:0.454528\nstep 5140,loss:0.467704\nstep 5150,loss:0.462776\nstep 5160,loss:0.4611\nstep 5170,loss:0.46712\nstep 5180,loss:0.458779\nstep 5190,loss:0.467931\nstep 5200,loss:0.502758\nstep 5210,loss:0.446557\nstep 5220,loss:0.469266\nstep 5230,loss:0.495359\nstep 5240,loss:0.461713\nstep 5250,loss:0.473753\nstep 5260,loss:0.476298\nstep 5270,loss:0.450235\nstep 5280,loss:0.459845\nstep 5290,loss:0.442475\nstep 5300,loss:0.439707\nstep 5310,loss:0.46714\nstep 5320,loss:0.431232\nstep 5330,loss:0.470107\nstep 5340,loss:0.463427\nstep 5350,loss:0.466058\nstep 5360,loss:0.450326\nstep 5370,loss:0.463515\nstep 5380,loss:0.463677\nstep 5390,loss:0.454099\nstep 5400,loss:0.46403\nstep 5410,loss:0.476057\nstep 5420,loss:0.464168\nstep 5430,loss:0.435726\nstep 5440,loss:0.455622\nstep 5450,loss:0.448969\nstep 5460,loss:0.502296\nstep 5470,loss:0.475308\nstep 5480,loss:0.455801\nstep 5490,loss:0.480662\nstep 5500,loss:0.468888\n距离训练完一整轮剩余时间： 328.8926888062308  分钟\nper_train_loss_avg: 3.413480639266048 **********\nvaild_loss: 0.47608462\nstep 5510,loss:0.454565\nstep 5520,loss:0.506936\nstep 5530,loss:0.4599\nstep 5540,loss:0.472829\nstep 5550,loss:0.468907\nstep 5560,loss:0.448694\nstep 5570,loss:0.457573\nstep 5580,loss:0.489682\nstep 5590,loss:0.446973\nstep 5600,loss:0.418248\nstep 5610,loss:0.477217\nstep 5620,loss:0.446088\nstep 5630,loss:0.46035\nstep 5640,loss:0.47479\nstep 5650,loss:0.466485\nstep 5660,loss:0.44008\nstep 5670,loss:0.459594\nstep 5680,loss:0.460788\nstep 5690,loss:0.465764\nstep 5700,loss:0.424706\nstep 5710,loss:0.466379\nstep 5720,loss:0.495129\nstep 5730,loss:0.46996\nstep 5740,loss:0.461649\nstep 5750,loss:0.489088\nstep 5760,loss:0.432546\nstep 5770,loss:0.477655\nstep 5780,loss:0.470471\nstep 5790,loss:0.437154\nstep 5800,loss:0.452875\nstep 5810,loss:0.442279\nstep 5820,loss:0.47144\nstep 5830,loss:0.459897\nstep 5840,loss:0.467102\nstep 5850,loss:0.483034\nstep 5860,loss:0.464171\nstep 5870,loss:0.487813\nstep 5880,loss:0.482004\nstep 5890,loss:0.50534\nstep 5900,loss:0.466308\nstep 5910,loss:0.458923\nstep 5920,loss:0.465595\nstep 5930,loss:0.473561\nstep 5940,loss:0.456458\nstep 5950,loss:0.457062\nstep 5960,loss:0.466479\nstep 5970,loss:0.457039\nstep 5980,loss:0.493772\nstep 5990,loss:0.476743\nstep 6000,loss:0.460869\n距离训练完一整轮剩余时间： 261.8399872040997  分钟\nper_train_loss_avg: 0.4652705455422401 **********\nvaild_loss: 0.4689842\nstep 6010,loss:0.466254\nstep 6020,loss:0.46503\nstep 6030,loss:0.473422\nstep 6040,loss:0.453522\nstep 6050,loss:0.466395\nstep 6060,loss:0.479748\nstep 6070,loss:0.465742\nstep 6080,loss:0.453819\nstep 6090,loss:0.461479\nstep 6100,loss:0.469898\nstep 6110,loss:0.464695\nstep 6120,loss:0.486224\nstep 6130,loss:0.476903\nstep 6140,loss:0.475701\nstep 6150,loss:0.524369\nstep 6160,loss:0.470808\nstep 6170,loss:0.456887\nstep 6180,loss:0.450372\nstep 6190,loss:0.462909\nstep 6200,loss:0.440162\nstep 6210,loss:0.507125\nstep 6220,loss:0.456177\nstep 6230,loss:0.473141\nstep 6240,loss:0.470093\nstep 6250,loss:0.457104\nstep 6260,loss:0.470548\nstep 6270,loss:0.45553\nstep 6280,loss:0.468843\nstep 6290,loss:0.494197\nstep 6300,loss:0.456356\nstep 6310,loss:0.463026\nstep 6320,loss:0.434251\nstep 6330,loss:0.449221\nstep 6340,loss:0.50154\nstep 6350,loss:0.475613\nstep 6360,loss:0.504055\nstep 6370,loss:0.464056\nstep 6380,loss:0.440278\nstep 6390,loss:0.458843\nstep 6400,loss:0.460554\nstep 6410,loss:0.47314\nstep 6420,loss:0.48579\nstep 6430,loss:0.442146\nstep 6440,loss:0.454933\nstep 6450,loss:0.470811\nstep 6460,loss:0.471744\nstep 6470,loss:0.450225\nstep 6480,loss:0.446405\nstep 6490,loss:0.475976\nstep 6500,loss:0.482849\n距离训练完一整轮剩余时间： 258.7030895333737  分钟\nper_train_loss_avg: 0.46746659684181213 **********\nvaild_loss: 0.46904892\nstep 6510,loss:0.463701\nstep 6520,loss:0.465227\nstep 6530,loss:0.417958\nstep 6540,loss:0.487728\nstep 6550,loss:0.479942\nstep 6560,loss:0.46048\nstep 6570,loss:0.431983\nstep 6580,loss:0.473082\nstep 6590,loss:0.424181\nstep 6600,loss:0.438819\nstep 6610,loss:0.443366\nstep 6620,loss:0.486783\nstep 6630,loss:0.442349\nstep 6640,loss:0.473176\nstep 6650,loss:0.467187\nstep 6660,loss:0.472572\nstep 6670,loss:0.46086\nstep 6680,loss:0.528804\nstep 6690,loss:0.44681\nstep 6700,loss:4.11209\nstep 6710,loss:1722.54\nstep 6720,loss:2483.82\nstep 6730,loss:66.294\nstep 6740,loss:63.8209\nstep 6750,loss:39.0713\nstep 6760,loss:22.0572\nstep 6770,loss:18.9451\nstep 6780,loss:6.77149\nstep 6790,loss:5.5965\nstep 6800,loss:2.91483\nstep 6810,loss:2.0582\n","name":"stdout"}],"source":"\r\nfrom time import time\r\nfor batch in batches:\r\n    time1=time()\r\n    batch_x,batch_y=zip(*batch)\r\n    batch_y=np.array(batch_y)[:,None]\r\n    batch_x=[i.tolist() for i in batch_x]\r\n    batch_x=pd.DataFrame(batch_x)\r\n    batch_x[[0,1]]=batch_x[[0,1]].applymap(lambda x:x.split(' '))\r\n    batch_x=np.array(batch_x)\r\n    #####[None,title_len,embedding_size]\r\n    #np.apply_along_axis(lambda x:[5,5,5,5,5] if x[0]==2 else x,axis=2,arr=a)\r\n    #query   title   title_count         title_ctr      label\r\n    batch_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=batch_x)\r\n    batch_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=batch_x)\r\n    #     train_op=optimizer.apply_gradients(grads_and_vars,global_step=global_step)                              \r\n    feed_dict = {model.embedded_x_query: batch_x_query,\r\n                 model.embedded_x_title: batch_x_title,\r\n                 model.y_inputs: batch_y,\r\n                 model.batch_size: batch_x.shape[0], \r\n                 model.tst: False, \r\n                 model.keep_prob: 0.8}\r\n\r\n    _,step,loss_out=sess.run([train_op,model.global_step,model.loss],feed_dict)\r\n    time2=time()\r\n    parts_time.append(time2-time1)\r\n    if step%10==0:\r\n        print(\"step {},loss:{:g}\".format(step,loss_out))              \r\n    per_train_loss_avg_list.append(loss_out)\r\n    \r\n    \r\n    if step%(int(50000000/1004))==0:\r\n        print(\"batch ended\")\r\n    if step%500==0:\r\n        time_single_part=sum(parts_time)\r\n        ed_time=(50000000/settings.train_batch_size-step)/500*time_single_part\r\n        print(\"距离训练完一整轮剩余时间：\",ed_time/60,\" 分钟\")\r\n        parts_time=[]\r\n        vaild_x,vaild_y=zip(*(vaild_data.__next__()))\r\n        vaild_y=np.array(vaild_y)[:,None]\r\n        vaild_x=[i.tolist() for i in vaild_x]\r\n        vaild_x=pd.DataFrame(vaild_x)\r\n        vaild_x[[0,1]]=vaild_x[[0,1]].applymap(lambda x:x.split(' '))\r\n        vaild_x=np.array(vaild_x)\r\n    \r\n        vaild_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=vaild_x)\r\n        vaild_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=vaild_x)\r\n\r\n        feed_dict = {model.embedded_x_query: vaild_x_query,\r\n                     model.embedded_x_title: vaild_x_title,\r\n                     model.y_inputs: vaild_y,\r\n                     model.batch_size: vaild_x.shape[0], \r\n                     model.tst: False, \r\n                     model.keep_prob: 1}\r\n    \r\n        step,loss_out=sess.run([model.global_step,model.loss],feed_dict)\r\n        per_vaild_loss_avg_list.append(loss_out)\r\n        per_train_loss_avg=sum(per_train_loss_avg_list)/len(per_train_loss_avg_list)\r\n        print(\"per_train_loss_avg:\",per_train_loss_avg,\"**********\")\r\n        per_train_loss_avg_list=[]\r\n        print(\"vaild_loss:\",loss_out)\r\n    if step%5000==0:\r\n        per_vaild_loss_avg=sum(per_vaild_loss_avg_list)/len(per_vaild_loss_avg_list)\r\n        per_vaild_loss_avg_list=[]\r\n        print(\"per_vaild_loss_avg:\",per_vaild_loss_avg,\"************\")\r\n        saver = tf.train.Saver(max_to_keep = 3)\r\n        saver = saver.save(sess, \"/home/kesci/work/nn/tf_model_text_cnn_diin/rnn\", global_step=step)","execution_count":3},{"metadata":{"id":"40D3FDE1886741ED8DC72BC7129C03B7","collapsed":false,"scrolled":false},"cell_type":"code","outputs":[{"output_type":"stream","text":"INFO:tensorflow:Restoring parameters from /home/kesci/work/tf_model_text_cnn_diin/rnn-5000\n","name":"stdout"}],"source":"saver = tf.train.Saver()\r\nsaver.restore(sess, tf.train.latest_checkpoint('/home/kesci/work/tf_model_text_cnn_diin'))","execution_count":8},{"metadata":{"id":"B8F4E8858C5A4C628C71603A649E728E","collapsed":false,"scrolled":true},"cell_type":"code","outputs":[{"output_type":"stream","text":"step 3430,loss:0.470169\nstep 3440,loss:0.471679\nstep 3450,loss:0.468144\nstep 3460,loss:0.507409\nstep 3470,loss:0.465345\nstep 3480,loss:0.450469\nstep 3490,loss:0.438321\nstep 3500,loss:0.482978\n距离训练完一整轮剩余时间： 97.05731442160905  分钟\nper_train_loss_avg: 0.4659786524777422 **********\nvaild_loss: 0.47115192\nstep 3510,loss:0.440756\nstep 3520,loss:0.477134\nstep 3530,loss:0.44674\nstep 3540,loss:0.466038\nstep 3550,loss:0.464609\nstep 3560,loss:0.436238\nstep 3570,loss:0.4562\nstep 3580,loss:0.481882\nstep 3590,loss:0.434021\nstep 3600,loss:0.464806\nstep 3610,loss:0.475996\nstep 3620,loss:0.478425\nstep 3630,loss:0.595179\nstep 3640,loss:0.46057\nstep 3650,loss:0.436509\nstep 3660,loss:0.48458\nstep 3670,loss:0.459547\nstep 3680,loss:0.468498\nstep 3690,loss:0.45243\nstep 3700,loss:0.43443\nstep 3710,loss:0.473463\nstep 3720,loss:0.467172\nstep 3730,loss:0.45866\nstep 3740,loss:0.477507\nstep 3750,loss:0.444473\nstep 3760,loss:0.421405\nstep 3770,loss:0.476464\nstep 3780,loss:0.502113\nstep 3790,loss:0.475296\nstep 3800,loss:0.461668\nstep 3810,loss:0.443936\nstep 3820,loss:0.465145\nstep 3830,loss:0.47512\nstep 3840,loss:0.470522\nstep 3850,loss:0.463475\nstep 3860,loss:0.443318\nstep 3870,loss:0.489175\nstep 3880,loss:0.449162\nstep 3890,loss:0.463322\nstep 3900,loss:0.474481\nstep 3910,loss:0.464143\nstep 3920,loss:0.482265\nstep 3930,loss:0.450919\nstep 3940,loss:0.471468\nstep 3950,loss:0.470618\nstep 3960,loss:0.460965\nstep 3970,loss:0.496844\nstep 3980,loss:0.496835\nstep 3990,loss:0.465293\nstep 4000,loss:0.461935\n距离训练完一整轮剩余时间： 639.9746258293588  分钟\nper_train_loss_avg: 0.46323683965206147 **********\nvaild_loss: 0.4697268\nstep 4010,loss:0.45999\nstep 4020,loss:0.479045\nstep 4030,loss:0.469251\nstep 4040,loss:0.441914\nstep 4050,loss:0.47959\nstep 4060,loss:0.468091\nstep 4070,loss:0.42655\nstep 4080,loss:0.468559\nstep 4090,loss:0.469526\nstep 4100,loss:0.476396\nstep 4110,loss:0.451558\nstep 4120,loss:0.45006\nstep 4130,loss:0.468637\nstep 4140,loss:0.452383\nstep 4150,loss:0.479956\nstep 4160,loss:0.478406\nstep 4170,loss:0.464664\nstep 4180,loss:0.473466\nstep 4190,loss:0.44282\nstep 4200,loss:0.480977\nstep 4210,loss:0.455953\nstep 4220,loss:0.455149\nstep 4230,loss:0.470252\nstep 4240,loss:0.457954\nstep 4250,loss:0.466244\nstep 4260,loss:0.476906\nstep 4270,loss:0.462789\nstep 4280,loss:0.429452\nstep 4290,loss:0.45625\nstep 4300,loss:0.444178\nstep 4310,loss:0.455992\nstep 4320,loss:0.458681\nstep 4330,loss:0.458694\nstep 4340,loss:0.447163\nstep 4350,loss:0.460138\nstep 4360,loss:0.433304\nstep 4370,loss:0.459274\nstep 4380,loss:0.463698\nstep 4390,loss:0.459341\nstep 4400,loss:0.441632\nstep 4410,loss:0.445563\nstep 4420,loss:0.465013\nstep 4430,loss:0.469929\nstep 4440,loss:0.468671\nstep 4450,loss:0.471092\nstep 4460,loss:0.455728\nstep 4470,loss:0.48392\nstep 4480,loss:0.45853\nstep 4490,loss:0.477467\nstep 4500,loss:0.465119\n距离训练完一整轮剩余时间： 633.0184579463055  分钟\nper_train_loss_avg: 0.46147100937366486 **********\nvaild_loss: 0.4651229\nstep 4510,loss:0.467763\nstep 4520,loss:0.48275\nstep 4530,loss:0.489621\nstep 4540,loss:0.451587\nstep 4550,loss:0.462409\nstep 4560,loss:0.443969\nstep 4570,loss:0.466777\nstep 4580,loss:0.454993\nstep 4590,loss:0.468068\nstep 4600,loss:0.467277\nstep 4610,loss:0.485589\nstep 4620,loss:0.462557\nstep 4630,loss:0.468345\nstep 4640,loss:0.452897\nstep 4650,loss:0.469163\nstep 4660,loss:0.475828\nstep 4670,loss:0.467033\nstep 4680,loss:0.471316\nstep 4690,loss:0.453775\nstep 4700,loss:0.463159\nstep 4710,loss:0.450314\nstep 4720,loss:0.459324\nstep 4730,loss:0.47123\nstep 4740,loss:0.469314\nstep 4750,loss:0.439286\nstep 4760,loss:0.431452\nstep 4770,loss:0.454676\nstep 4780,loss:0.474074\nstep 4790,loss:0.417937\nstep 4800,loss:0.431811\nstep 4810,loss:0.477285\nstep 4820,loss:0.485203\nstep 4830,loss:0.45647\nstep 4840,loss:30.565\nstep 4850,loss:1.9985e+06\nstep 4860,loss:2.03844e+09\nstep 4870,loss:2.28398e+08\nstep 4880,loss:1.14616e+08\nstep 4890,loss:3.98945e+08\nstep 4900,loss:2.42558e+09\nstep 4910,loss:9.77744e+11\nstep 4920,loss:2.82965e+09\nstep 4930,loss:6.9218e+08\nstep 4940,loss:5.56079e+10\n","name":"stdout"}],"source":"parts_time=[]\r\nfrom time import time\r\nfor batch in batches:\r\n    time1=time()\r\n    batch_x,batch_y=zip(*batch)\r\n    batch_y=np.array(batch_y)[:,None]\r\n    batch_x=[i.tolist() for i in batch_x]\r\n    batch_x=pd.DataFrame(batch_x)\r\n    batch_x[[0,1]]=batch_x[[0,1]].applymap(lambda x:x.split(' '))\r\n    batch_x=np.array(batch_x)\r\n    #####[None,title_len,embedding_size]\r\n    #np.apply_along_axis(lambda x:[5,5,5,5,5] if x[0]==2 else x,axis=2,arr=a)\r\n    #query   title   title_count         title_ctr      label\r\n    batch_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=batch_x)\r\n    batch_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=batch_x)\r\n    #     train_op=optimizer.apply_gradients(grads_and_vars,global_step=global_step)                              \r\n    feed_dict = {model.embedded_x_query: batch_x_query,\r\n                 model.embedded_x_title: batch_x_title,\r\n                 model.y_inputs: batch_y,\r\n                 model.batch_size: batch_x.shape[0], \r\n                 model.is_training: True, \r\n                 model.keep_prob: 0.8}\r\n\r\n    _,step,loss_out=sess.run([train_op,model.global_step,model.loss],feed_dict)\r\n    time2=time()\r\n    parts_time.append(time2-time1)\r\n    if step%10==0:\r\n        print(\"step {},loss:{:g}\".format(step,loss_out))              \r\n    per_train_loss_avg_list.append(loss_out)\r\n    \r\n    \r\n    if step%(int(50000000/1004))==0:\r\n        print(\"batch ended\")\r\n    if step%500==0:\r\n        time_single_part=sum(parts_time)\r\n        ed_time=(50000000/settings.train_batch_size-step)/500*time_single_part\r\n        print(\"距离训练完一整轮剩余时间：\",ed_time/60,\" 分钟\")\r\n        parts_time=[]\r\n        vaild_x,vaild_y=zip(*(vaild_data.__next__()))\r\n        vaild_y=np.array(vaild_y)[:,None]\r\n        vaild_x=[i.tolist() for i in vaild_x]\r\n        vaild_x=pd.DataFrame(vaild_x)\r\n        vaild_x[[0,1]]=vaild_x[[0,1]].applymap(lambda x:x.split(' '))\r\n        vaild_x=np.array(vaild_x)\r\n    \r\n        vaild_x_query=np.apply_along_axis(lambda x:get_w2v_array(x[0],settings.query_len),axis=1,arr=vaild_x)\r\n        vaild_x_title=np.apply_along_axis(lambda x:get_w2v_array(x[1],settings.title_len),axis=1,arr=vaild_x)\r\n\r\n        feed_dict = {model.embedded_x_query: vaild_x_query,\r\n                     model.embedded_x_title: vaild_x_title,\r\n                     model.y_inputs: vaild_y,\r\n                     model.batch_size: vaild_x.shape[0], \r\n                     model.is_training: False, \r\n                     model.keep_prob: 1}\r\n    \r\n        step,loss_out=sess.run([model.global_step,model.loss],feed_dict)\r\n        per_vaild_loss_avg_list.append(loss_out)\r\n        per_train_loss_avg=sum(per_train_loss_avg_list)/len(per_train_loss_avg_list)\r\n        print(\"per_train_loss_avg:\",per_train_loss_avg,\"**********\")\r\n        per_train_loss_avg_list=[]\r\n        print(\"vaild_loss:\",loss_out)\r\n    if step%5000==0:\r\n        per_vaild_loss_avg=sum(per_vaild_loss_avg_list)/len(per_vaild_loss_avg_list)\r\n        per_vaild_loss_avg_list=[]\r\n        print(\"per_vaild_loss_avg:\",per_vaild_loss_avg,\"************\")\r\n        saver = tf.train.Saver(max_to_keep = 3,var_list=tf.global_variables())\r\n        saver = saver.save(sess, \"/home/kesci/work/first_zzp/tf_model_text_cnn_diin/rnn\", global_step=step)","execution_count":null},{"metadata":{"id":"8728A0559AA14069A781279723FB8295"},"cell_type":"code","outputs":[],"source":"","execution_count":null}],"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.6.4","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"}},"nbformat":4,"nbformat_minor":0}