{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n",
      "  return f(*args, **kwds)\n",
      "/usr/local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "%reload_ext autoreload \n",
    "%autoreload 2\n",
    "import os\n",
    "import sys\n",
    "\n",
    "project_basedir = '..'\n",
    "sys.path.append(project_basedir)\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from matplotlib import pyplot as plt\n",
    "import random \n",
    "import time\n",
    "from common.utils import Dataset,ProgressBar\n",
    "from tflearn.data_flow import DataFlow,DataFlowStatus,FeedDictFlow\n",
    "from tflearn.data_utils import Preloader,ImagePreloader\n",
    "import scipy\n",
    "import pandas as pd\n",
    "import xmltodict\n",
    "import common\n",
    "import tflearn\n",
    "import copy\n",
    "from cchess import *\n",
    "from gameplays.game_convert import convert_game,convert_game_value,convert_game_board,is_game_valid\n",
    "import os\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = ''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Tue Aug 14 22:36:16 2018       \r\n",
      "+-----------------------------------------------------------------------------+\r\n",
      "| NVIDIA-SMI 384.111                Driver Version: 384.111                   |\r\n",
      "|-------------------------------+----------------------+----------------------+\r\n",
      "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\r\n",
      "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\r\n",
      "|===============================+======================+======================|\r\n",
      "|   0  GeForce GTX 108...  Off  | 00000000:05:00.0 Off |                  N/A |\r\n",
      "| 49%   82C    P2   129W / 250W |   9034MiB / 11172MiB |     32%      Default |\r\n",
      "+-------------------------------+----------------------+----------------------+\r\n",
      "|   1  GeForce GTX 108...  Off  | 00000000:42:00.0 Off |                  N/A |\r\n",
      "| 49%   82C    P2   116W / 250W |   9034MiB / 11172MiB |     24%      Default |\r\n",
      "+-------------------------------+----------------------+----------------------+\r\n",
      "                                                                               \r\n",
      "+-----------------------------------------------------------------------------+\r\n"
     ]
    }
   ],
   "source": [
    "!nvidia-smi | head -n 15"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# a network predict select and move of Chinese chess, with minimal preprocessing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "GPU_CORE = [0]\n",
    "BATCH_SIZE = 512\n",
    "BEGINING_LR = 0.01\n",
    "#TESTIMG_WIDTH = 500\n",
    "model_name = 'supervisord_model'\n",
    "data_dir = '../data/imsa-cbf/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "labels = common.board.create_uci_labels()\n",
    "label2ind = dict(zip(labels,list(range(len(labels)))))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "pgn2value = dict(pd.read_csv('../data/resultlist.csv').values[:,1:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['10001.pgn', '10002.pgn', '10003.pgn']"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "list(pgn2value.keys())[:3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "killfile = [i[0] for i in [i for i in list(pgn2value.items()) if i[1] == 1]]\n",
    "np.random.shuffle(killfile)\n",
    "killfile = killfile[:20572 - 15286 ]\n",
    "#killfile = killfile[:20000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "for i in killfile:\n",
    "    del pgn2value[i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "??convert_game_value"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "filelist = [os.path.join('../../icyElephant/',i) for i in filelist]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'../../icyElephant/./data/imsa-cbf/2015年第二届淄博傅山杯象棋公开赛 {DD7D1558-A28F-4B6A-B435-230CC1C50C93}.cbf'"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "filelist[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def convert_game_value(onefile,feature_list,pgn2value):\n",
    "    try:\n",
    "        doc = xmltodict.parse(open(onefile,encoding='utf-8').read())\n",
    "        fen = doc['ChineseChessRecord'][\"Head\"][\"FEN\"]\n",
    "        if pgn2value is not None:\n",
    "            pgnfile = doc['ChineseChessRecord'][\"Head\"][\"From\"]\n",
    "        #moves = [i[\"@value\"] for i in  doc['ChineseChessRecord']['MoveList'][\"Move\"] if i[\"@value\"] != '00-00']\n",
    "        bb = BaseChessBoard(fen)\n",
    "        if pgn2value is not None:      \n",
    "            val = pgn2value.get(pgnfile)\n",
    "            return val\n",
    "    except:\n",
    "        return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import shutil"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mkdir: 无法创建目录\"../data/imsa-cbf\": 文件已存在\r\n"
     ]
    }
   ],
   "source": [
    "!mkdir ../data/imsa-cbf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "nonenum:5115 100.00 % [==================================================>] 49982/49982 \t used:126s eta:0 s"
     ]
    }
   ],
   "source": [
    "pb = ProgressBar(worksum=len(filelist))\n",
    "pb.startjob()\n",
    "nonenum = 0\n",
    "for i in filelist:\n",
    "    fname = i.split('/')[-1][:-4]\n",
    "    pb.info = \"nonenum:{}\".format(nonenum)\n",
    "    pb.complete(1)\n",
    "    val = convert_game_value(i,feature_list,pgn2value)\n",
    "    if val is None or np.isnan(val) :\n",
    "        nonenum += 1\n",
    "    if val is not None and not np.isnan(val):\n",
    "        val = int(val)\n",
    "        assert(val in [0,1,-1])\n",
    "        flag = {1:'w',0:'peace',-1:'b'}\n",
    "        shutil.copyfile(os.path.join('../../icyElephant/./data/imsa-cbf/{}.cbf'.format(fname)),\\\n",
    "                                    os.path.join('../data/imsa-cbf/{}_{}.cbf'.format(fname,flag[val])))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "np.isnan(val)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['10001.pgn', '10002.pgn', '10003.pgn']"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "list(pgn2value.keys())[:3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2015年第二届淄博傅山杯象棋公开赛 {DD7D1558-A28F-4B6A-B435-230CC1C50C93}_b.cbf\r\n",
      "2015年第二届淄博傅山杯象棋公开赛 {DD7D1558-A28F-4B6A-B435-230CC1C50C93}_peace.cbf\r\n",
      "2015年第二届淄博傅山杯象棋公开赛 {DD7D1558-A28F-4B6A-B435-230CC1C50C93}_w.cbf\r\n"
     ]
    }
   ],
   "source": [
    "!ls ../data/imsa-cbf/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "rev_ab = dict(zip('abcdefghi','abcdefghi'[::-1]))\n",
    "rev_num = dict(zip('0123456789','0123456789'[::-1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "../../icyElephant/./data/imsa-cbf/2015年第二届淄博傅山杯象棋公开赛 {DD7D1558-A28F-4B6A-B435-230CC1C50C93}.cbf\n",
      "../../icyElephant/./data/imsa-cbf/2015年腾讯棋牌全国象棋甲级联赛 {2175680E-9C33-4222-84BF-8BF84FEBBAF2}.cbf\n",
      "../../icyElephant/./data/imsa-cbf/no {BEAF4864-1754-43D2-A241-43552C46B930}.cbf\n"
     ]
    }
   ],
   "source": [
    "#content = pd.read_csv('../data/train_list.csv',header=None,index_col=None)\n",
    "#filelist = [i[0] for i in content.get_values()]\n",
    "dat = []\n",
    "feature_list = {\"red\":['A', 'B', 'C', 'K', 'N', 'P', 'R']\n",
    "                             ,\"black\":['a', 'b', 'c', 'k', 'n', 'p', 'r']}\n",
    "for i in filelist[:3]:\n",
    "    print(i)\n",
    "    one = is_game_valid(i,feature_list,pgn2value)\n",
    "    if one == False:\n",
    "        dat.append(0)\n",
    "    else:\n",
    "        dat.append(1)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "!mkdir imsa-cbf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "相约廊桥全国象棋棋后赛决五六名快棋_b.cbf\r\n",
      "相约廊桥象棋女子棋后赛_w.cbf\r\n",
      "相约廊桥象棋棋后赛 {1D0DA60D-AAAD-4416-8BB3-A0A2E8EF9001}_peace.cbf\r\n",
      "相约廊桥象棋棋后赛 {29DCED9F-09EB-4A19-B54B-F7D4A0050570}_peace.cbf\r\n",
      "相约廊桥象棋棋后赛 {47C6A0EE-9074-47F2-80D2-5B4A9D24E917}_peace.cbf\r\n",
      "相约廊桥象棋棋后赛 {56555A8C-FAD7-4E55-BB9E-0F09E8A735DE}_w.cbf\r\n",
      "相约廊桥象棋棋后赛 {C1A9D6B1-8EA4-42BA-BA88-05075922E8E6}_peace.cbf\r\n",
      "相约廊桥象棋棋后赛 {C9ACF2C5-DEF5-4889-9E50-085AE0B6151C}_peace.cbf\r\n",
      "相约廊桥象棋棋后赛_peace.cbf\r\n",
      "伊万塞蒂亚万 {1C97AA3B-7038-4670-8828-97326031905E}_w.cbf\r\n"
     ]
    }
   ],
   "source": [
    "!ls ../data/imsa-cbf/ | tail"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13674\r\n"
     ]
    }
   ],
   "source": [
    "!ls ../data/imsa-cbf/ | grep _w.cbf | wc -l"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 89,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "13676\r\n"
     ]
    }
   ],
   "source": [
    "!ls ../data/imsa-cbf/ | grep _b.cbf | wc -l"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 90,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "17519\r\n"
     ]
    }
   ],
   "source": [
    "!ls ../data/imsa-cbf/ | grep _peace.cbf | wc -l"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "﻿<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r",
      "\r\n",
      "<ChineseChessRecord Version=\"1.0\">\r",
      "\r\n",
      " <Head>\r",
      "\r\n",
      "  <Name>相约廊桥全国象棋棋后赛决五六名快棋</Name>\r",
      "\r\n",
      "  <URL />\r",
      "\r\n",
      "  <From>689139.pgn</From>\r",
      "\r\n",
      "  <ContestType />\r",
      "\r\n",
      "  <Contest />\r",
      "\r\n",
      "  <Round />\r",
      "\r\n",
      "  <Group />\r",
      "\r\n",
      "  <Table />\r",
      "\r\n",
      "  <Date>2016-11-20 09:47:00</Date>\r",
      "\r\n",
      "  <Site />\r",
      "\r\n",
      "  <TimeRule />\r",
      "\r\n",
      "  <Red>刘欢</Red>\r",
      "\r\n",
      "  <RedTeam />\r",
      "\r\n",
      "  <RedTime />\r",
      "\r\n",
      "  <RedRating />\r",
      "\r\n",
      "  <Black>左文静</Black>\r",
      "\r\n",
      "  <BlackTeam />\r",
      "\r\n",
      "  <BlackTime />\r",
      "\r\n",
      "  <BlackRating />\r",
      "\r\n",
      "  <Referee />\r",
      "\r\n",
      "  <Recorder />\r",
      "\r\n",
      "  <Commentator />\r",
      "\r\n",
      "  <CommentatorURL />\r",
      "\r\n",
      "  <Creator />\r",
      "\r\n",
      "  <CreatorURL />\r",
      "\r\n",
      "  <DateCreated />\r",
      "\r\n",
      "  <DateModified>2017-11-04 18:29:28</DateModified>\r",
      "\r\n",
      "  <ECCO>C51</ECCO>\r",
      "\r\n",
      "  <RecordType>1</RecordType>\r",
      "\r\n",
      "  <RecordKind />\r",
      "\r\n",
      "  <RecordResult>2</RecordResult>\r",
      "\r\n",
      "  <ResultType />\r",
      "\r\n",
      "  <FEN>rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR w - - 0 1</FEN>\r",
      "\r\n",
      " </Head>\r",
      "\r\n",
      " <MoveList>\r",
      "\r\n",
      "  <Move value=\"00-00\" />\r",
      "\r\n",
      "  <Move value=\"77-47\" />\r",
      "\r\n",
      "  <Move value=\"70-62\" />\r",
      "\r\n",
      "  <Move value=\"79-67\" />\r",
      "\r\n",
      "  <Move value=\"63-64\" />\r",
      "\r\n",
      "  <Move value=\"89-79\" />\r",
      "\r\n",
      "  <Move value=\"80-70\" />\r",
      "\r\n",
      "  <Move value=\"17-37\" />\r",
      "\r\n",
      "  <Move value=\"23-24\" />\r",
      "\r\n",
      "  <Move value=\"19-07\" />\r",
      "\r\n",
      "  <Move value=\"10-22\" />\r",
      "\r\n",
      "  <Move value=\"09-19\" end=\"1\" />\r",
      "\r\n",
      " </MoveList>\r",
      "\r\n",
      "</ChineseChessRecord>\r",
      "\r\n"
     ]
    }
   ],
   "source": [
    "! cat '../data/imsa-cbf/相约廊桥全国象棋棋后赛决五六名快棋_b.cbf'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'[Game \"Chinese Chess\"]\\n\\n[Event \"相约廊桥全国象棋棋后赛决五六名快棋\"]\\n\\n[Site \"\"]\\n\\n[Seesion \"1\"]\\n\\n[Date \"2016-11-20 09:47:00\"]\\n\\n[Round \"\"]\\n\\n[Red \"刘欢\"]\\n\\n[RedTeam \"\"]\\n\\n[Black \"左文静\"]\\n\\n[BlackTeam \"\"]\\n\\n[Result \"0-1\"]\\n\\n1.炮二平五 马8进7\\n\\n2.馬二进三 卒7进1\\n\\n3.車一平二 车9平8\\n\\n4.炮八平六 卒3进1\\n\\n5.馬八进九 马2进3\\n\\n6.車九平八 砲2平1\\n\\n7.車二进六 象7进5\\n\\n8.車八进七 砲8平9\\n\\n9.車二平三 马7退5\\n\\n10.兵五进一 马3进4\\n\\n11.車八进一 马4进6\\n\\n12.車三平四 马6进5\\n\\n13.相七进五 马5进3\\n\\n14.車四平一 砲9平6\\n\\n15.馬三进五 砲1进4\\n\\n16.兵七进一 车8进6\\n\\n17.兵七进一 车8平7\\n\\n18.馬五进七 象5进3\\n\\n19.兵五进一 车1平2\\n\\n20.車八平七 象3进5\\n\\n21.兵五进一 车2进3\\n\\n22.兵五平六 马3进4\\n\\n23.馬七进五 士4进5\\n\\n24.車七退三 砲6进2\\n\\n25.馬五进四 士5进6\\n\\n26.車七平六 砲6进4\\n\\n27.車一平五 士6进5\\n\\n28.車五进一 砲1平5\\n\\n29.仕六进五 车2进5\\n\\n30.車六平七 车2平5\\n\\n31.帥五平六 车5进1\\n\\n32.帥六进一 将5平6\\n\\n33.炮六进二 砲6退3\\n\\n34.車七进四 将6进1\\n\\n35.車五退一 车7进2\\n\\n36.帥六进一 砲6进2'"
      ]
     },
     "execution_count": 87,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "open('../../icyElephant/data/imsa_play/689139.pgn',encoding='gbk').read()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Counter(dat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class ElePreloader(object):\n",
    "    def __init__(self,datafile,batch_size=64):\n",
    "        self.batch_size=batch_size\n",
    "        content = pd.read_csv(datafile,header=None,index_col=None)\n",
    "        self.filelist = [i[0] for i in content.get_values()]\n",
    "        self.pos = 0\n",
    "        self.feature_list = {\"red\":['A', 'B', 'C', 'K', 'N', 'P', 'R']\n",
    "                             ,\"black\":['a', 'b', 'c', 'k', 'n', 'p', 'r']}\n",
    "        self.batch_size = batch_size\n",
    "        self.batch_iter = self.__iter()\n",
    "        assert(len(self.filelist) > batch_size)\n",
    "        self.game_iterlist = [None for i in self.filelist]\n",
    "    \n",
    "    def __iter(self):\n",
    "        retx1,rety1,retx2,rety2 = [],[],[],[]\n",
    "        vals = []\n",
    "        filelist = []\n",
    "        while True:\n",
    "            for i in range(self.batch_size):\n",
    "                if self.game_iterlist[i] == None:\n",
    "                    if len(filelist) == 0:\n",
    "                        filelist = copy.copy(self.filelist)\n",
    "                        random.shuffle(filelist)\n",
    "                    while True:\n",
    "                        onefile = filelist.pop()\n",
    "                        oneitem = convert_game_value(onefile,self.feature_list,pgn2value)\n",
    "                        try:\n",
    "                            game_valid = is_game_valid(onefile,self.feature_list,pgn2value)\n",
    "                        except:\n",
    "                            game_valid = False\n",
    "                        #print(oneitem,onefile)\n",
    "                        #if game_valid == False:\n",
    "                        #    print(onefile,'gg')\n",
    "                        #else:\n",
    "                        #    print(onefile,'not gg')\n",
    "                        if game_valid == True:\n",
    "                            self.game_iterlist[i] = oneitem\n",
    "                            break\n",
    "                        #print('hehe')\n",
    "                game_iter = self.game_iterlist[i]\n",
    "                \n",
    "                try:\n",
    "                    x1,y1,val1 = game_iter.__next__()\n",
    "                    x1 = np.transpose(x1,[1,2,0])\n",
    "                    x1 = np.expand_dims(x1,axis=0)\n",
    "                    \n",
    "                    if random.random() < 0.5:\n",
    "                        y1 = [rev_ab[y1[0]],y1[1],rev_ab[y1[2]],y1[3]]\n",
    "                        x1 = x1[:,:,::-1,:]\n",
    "                        #x1 = np.concatenate((x1[:,::-1,:,7:],x1[:,::-1,:,:7]),axis=-1)\n",
    "                    retx1.append(x1)\n",
    "                    #rety1.append(y1)\n",
    "                    oney = np.zeros(len(labels))\n",
    "                    oney[label2ind[''.join(y1)]] = 1\n",
    "                    rety1.append(oney)\n",
    "                    vals.append(val1)\n",
    "\n",
    "                    if len(retx1) >= self.batch_size:\n",
    "                        yield (np.concatenate(retx1,axis=0),np.asarray(rety1),np.asarray(vals))\n",
    "                        retx1,rety1 = [],[]\n",
    "                        vals = []\n",
    "                except :\n",
    "                    self.game_iterlist[i] = None\n",
    "\n",
    "    def __getitem__(self, id):\n",
    "        \n",
    "        x1,y1,val1 = self.batch_iter.__next__()\n",
    "        return x1,y1,val1\n",
    "        \n",
    "    def __len__(self):\n",
    "        return 10000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "trainset = ElePreloader(datafile='data/train_list.csv',batch_size=BATCH_SIZE)\n",
    "with tf.device(\"/gpu:{}\".format(GPU_CORE[0])):\n",
    "    coord = tf.train.Coordinator()\n",
    "    trainflow = FeedDictFlow({\n",
    "            'data':trainset,\n",
    "        },coord,batch_size=BATCH_SIZE,shuffle=True,continuous=True,num_threads=1)\n",
    "trainflow.start()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "testset = ElePreloader(datafile='data/test_list.csv',batch_size=BATCH_SIZE)\n",
    "with tf.device(\"/gpu:{}\".format(GPU_CORE[0])):\n",
    "    coord = tf.train.Coordinator()\n",
    "    testflow = FeedDictFlow({\n",
    "            'data':testset,\n",
    "        },coord,batch_size=BATCH_SIZE,shuffle=True,continuous=True,num_threads=1)\n",
    "testflow.start()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "for i in range(100):\n",
    "    sample_x1,sample_y1,sample_value = trainflow.next()['data']\n",
    "    print(1,end=',')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Counter({-1.0: 152, -0.0: 211, 1.0: 148, nan: 1})"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Counter(sample_value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "sample_x1,sample_y1,sample_value = testflow.next()['data']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((512, 10, 9, 14), (512, 2086), (512,))"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sample_x1.shape,sample_y1.shape,sample_value.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'b2e2'"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "labels[np.argmax(sample_y1[0])]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1, 1, 1, 1, 1, 1, 1, 1, 1],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [0, 1, 0, 0, 0, 0, 0, 1, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1],\n",
       "       [0, 1, 0, 0, 0, 0, 0, 1, 0],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint64)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.sum(sample_x1[0],axis=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0., -1., -1.,  0.,  0.,  1.,  1.,  1.,  1.,  1.])"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sample_value[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2086"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(10, 9)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.sum(sample_x1[0],axis=-1).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def res_block(inputx,name,training,block_num=2,filters=128,kernel_size=(3,3)):\n",
    "    net = inputx\n",
    "    for i in range(block_num):\n",
    "        net = tf.layers.conv2d(net,filters=filters,kernel_size=kernel_size,activation=None,name=\"{}_res_conv{}\".format(name,i),padding='same')\n",
    "        net = tf.layers.batch_normalization(net,training=training,name=\"{}_res_bn{}\".format(name,i))\n",
    "        if i == block_num - 1:\n",
    "            net = net + inputx #= tf.concat((inputx,net),axis=-1)\n",
    "        net = tf.nn.elu(net,name=\"{}_res_elu{}\".format(name,i))\n",
    "    return net\n",
    "\n",
    "def conv_block(inputx,name,training,block_num=1,filters=2,kernel_size=(1,1)):\n",
    "    net = inputx\n",
    "    for i in range(block_num):\n",
    "        net = tf.layers.conv2d(net,filters=filters,kernel_size=kernel_size,activation=None,name=\"{}_convblock_conv{}\".format(name,i),padding='same')\n",
    "        net = tf.layers.batch_normalization(net,training=training,name=\"{}_convblock_bn{}\".format(name,i))\n",
    "        net = tf.nn.elu(net,name=\"{}_convblock_elu{}\".format(name,i))\n",
    "    # net [None,10,9,2]\n",
    "    netshape = net.get_shape().as_list()\n",
    "    print(\"inside conv block {}\".format(str(netshape)))\n",
    "    net = tf.reshape(net,shape=(-1,netshape[1] * netshape[2] * netshape[3]))\n",
    "    net = tf.layers.dense(net,10 * 9,name=\"{}_dense\".format(name))\n",
    "    net = tf.nn.elu(net,name=\"{}_elu\".format(name))\n",
    "    return net\n",
    "\n",
    "def res_net_board(inputx,name,training,filters=128):\n",
    "    net = inputx\n",
    "    net = tf.layers.conv2d(net,filters=filters,kernel_size=(3,3),activation=None,name=\"{}_res_convb\".format(name),padding='same')\n",
    "    net = tf.layers.batch_normalization(net,training=training,name=\"{}_res_bnb\".format(name))\n",
    "    net = tf.nn.elu(net,name=\"{}_res_elub\".format(name))\n",
    "    for i in range(NUM_RES_LAYERS):\n",
    "        net = res_block(net,name=\"{}_layer_{}\".format(name,i + 1),training=training)\n",
    "        print(net.get_shape().as_list())\n",
    "    print(\"inside res net {}\".format(str(net.get_shape().as_list())))\n",
    "    #net_unsoftmax = conv_block(net,name=\"{}_conv\".format(name),training=training)\n",
    "    return net\n",
    "\n",
    "def get_scatter(name):\n",
    "    with tf.variable_scope(\"Test\"):\n",
    "        ph = tf.placeholder(tf.float32,name=name)\n",
    "        op = tf.summary.scalar(name,ph)\n",
    "    return ph,op"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def average_gradients(tower_grads):\n",
    "    \"\"\"Calculate the average gradient for each shared variable across all towers.\n",
    "    Note that this function provides a synchronization point across all towers.\n",
    "    Args:\n",
    "    tower_grads: List of lists of (gradient, variable) tuples. The outer list\n",
    "      is over individual gradients. The inner list is over the gradient\n",
    "      calculation for each tower.\n",
    "    Returns:\n",
    "     List of pairs of (gradient, variable) where the gradient has been averaged\n",
    "     across all towers.\n",
    "\n",
    "\n",
    "    \"\"\"\n",
    "    average_grads = []\n",
    "    for grad_and_vars in zip(*tower_grads):\n",
    "        # Note that each grad_and_vars looks like the following:\n",
    "        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n",
    "        grads = []\n",
    "        for g, _ in grad_and_vars:\n",
    "            # Add 0 dimension to the gradients to represent the tower.\n",
    "            expanded_g = tf.expand_dims(g, 0)\n",
    "            # Append on a 'tower' dimension which we will average over below.\n",
    "            grads.append(expanded_g)\n",
    "\n",
    "        # Average over the 'tower' dimension.\n",
    "        grad = tf.concat(grads,0)\n",
    "        grad = tf.reduce_mean(grad, 0)\n",
    "\n",
    "        # Keep in mind that the Variables are redundant because they are shared\n",
    "        # across towers. So .. we will just return the first tower's pointer to\n",
    "        # the Variable.\n",
    "        v = grad_and_vars[0][1]\n",
    "        grad_and_var = (grad, v)\n",
    "        average_grads.append(grad_and_var)\n",
    "    return average_grads\n",
    "\n",
    "def add_grad_to_list(opt,train_param,loss,tower_grad):\n",
    "    grads = opt.compute_gradients(loss, var_list = train_param)\n",
    "    grads = [i[0] for i in grads]\n",
    "    #print(grads)\n",
    "    tower_grad.append(zip(grads,train_param))\n",
    "    \n",
    "def get_op_mul(tower_gradients,optimizer,gs):\n",
    "    grads = average_gradients(tower_gradients)\n",
    "    train_op = optimizer.apply_gradients(grads,gs)\n",
    "    return train_op\n",
    "\n",
    "def reduce_mean(x):\n",
    "    return tf.reduce_mean(x)\n",
    "\n",
    "def merge(x):\n",
    "    return tf.concat(x,axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "GPU_CORE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "[None, 10, 9, 128]\n",
      "inside res net [None, 10, 9, 128]\n"
     ]
    }
   ],
   "source": [
    "tf.reset_default_graph()\n",
    "\n",
    "NUM_RES_LAYERS = 7\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "#with tf.device(\"/gpu:{}\".format(GPU_CORE)):\n",
    "    X = tf.placeholder(tf.float32,[None,10,9,14])\n",
    "    nextmove = tf.placeholder(tf.float32,[None,len(labels)])\n",
    "    score = tf.placeholder(tf.float32,[None,1])\n",
    "    \n",
    "    training = tf.placeholder(tf.bool,name='training_mode')\n",
    "    learning_rate = tf.placeholder(tf.float32)\n",
    "    global_step = tf.train.get_or_create_global_step()\n",
    "    optimizer_policy = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)\n",
    "    optimizer_value = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)\n",
    "    optimizer_multitarg = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)\n",
    "    \n",
    "    tower_gradients_policy,tower_gradients_value,tower_gradients_multitarg = [],[],[]\n",
    "    \n",
    "    net_softmax_collection = []\n",
    "    value_head_collection = []\n",
    "    multitarget_loss_collection = []\n",
    "    value_loss_collection = []\n",
    "    policy_loss_collection = []\n",
    "    accuracy_select_collection = []\n",
    "    with tf.variable_scope(tf.get_variable_scope()) as vscope:\n",
    "        for ind,one_core in enumerate(GPU_CORE):\n",
    "            with tf.device(\"/gpu:{}\".format(one_core)):\n",
    "                print(ind)\n",
    "                body = res_net_board(X[ind * (BATCH_SIZE // len(GPU_CORE)):(ind + 1) * (BATCH_SIZE // len(GPU_CORE))],\n",
    "                                     \"selectnet\",training=training)\n",
    "                with tf.variable_scope(\"policy_head\"):\n",
    "                    policy_head = tf.layers.conv2d(body, 2, 1, padding='SAME')\n",
    "                    policy_head = tf.contrib.layers.batch_norm(policy_head, center=False, epsilon=1e-5, fused=True,\n",
    "                                                                is_training=training, activation_fn=tf.nn.relu)\n",
    "\n",
    "                    # print(self.policy_head.shape)  # (?, 9, 10, 2)\n",
    "                    policy_head = tf.reshape(policy_head, [-1, 9 * 10 * 2])\n",
    "                    policy_head = tf.contrib.layers.fully_connected(policy_head, len(labels), activation_fn=None)\n",
    "                    #self.policy_head.append(policy_head)    # 保存多个gpu的策略头结果（走子概率向量）\n",
    "\n",
    "                # 价值头\n",
    "                with tf.variable_scope(\"value_head\"):\n",
    "                    value_head = tf.layers.conv2d(body, 1, 1, padding='SAME')\n",
    "                    value_head = tf.contrib.layers.batch_norm(value_head, center=False, epsilon=1e-5, fused=True,\n",
    "                                                    is_training=training, activation_fn=tf.nn.relu)\n",
    "                    # print(self.value_head.shape)  # (?, 9, 10, 1)\n",
    "                    value_head = tf.reshape(value_head, [-1, 9 * 10 * 1])\n",
    "                    value_head = tf.contrib.layers.fully_connected(value_head, 256, activation_fn=tf.nn.relu)\n",
    "                    value_head = tf.contrib.layers.fully_connected(value_head, 1, activation_fn=tf.nn.tanh)\n",
    "                    value_head_collection.append(value_head)\n",
    "                net_unsoftmax = policy_head\n",
    "\n",
    "                with tf.variable_scope(\"Loss\"):\n",
    "                    policy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n",
    "                        labels=nextmove[ind * (BATCH_SIZE // len(GPU_CORE)):(ind + 1) * (BATCH_SIZE // len(GPU_CORE))],\n",
    "                        logits=net_unsoftmax))\n",
    "                    #loss_summary = tf.summary.scalar(\"move_loss\",policy_loss)\n",
    "                    value_loss = tf.losses.mean_squared_error(\n",
    "                        labels=score[ind * (BATCH_SIZE // len(GPU_CORE)):(ind + 1) * (BATCH_SIZE // len(GPU_CORE))],\n",
    "                        predictions=value_head) \n",
    "                    value_loss = tf.reduce_mean(value_loss)\n",
    "                    regularizer = tf.contrib.layers.l2_regularizer(scale=1e-6)\n",
    "                    regular_variables = tf.trainable_variables()\n",
    "                    l2_loss = tf.contrib.layers.apply_regularization(regularizer, regular_variables)\n",
    "                    multitarget_loss = value_loss + policy_loss + l2_loss\n",
    "                    \n",
    "                    multitarget_loss_collection.append(multitarget_loss)\n",
    "                    value_loss_collection.append(value_loss)\n",
    "                    policy_loss_collection.append(policy_loss)\n",
    "                net_softmax = tf.nn.softmax(net_unsoftmax)\n",
    "                net_softmax_collection.append(net_softmax)\n",
    "                \n",
    "                correct_prediction = tf.equal(tf.argmax(nextmove,1), tf.argmax(net_softmax,1))\n",
    "\n",
    "                with tf.variable_scope(\"Accuracy\"):\n",
    "                    accuracy_select = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "                    accuracy_select_collection.append(accuracy_select)\n",
    "                tf.get_variable_scope().reuse_variables()\n",
    "                trainable_params = tf.trainable_variables()\n",
    "                tp_policy = [i for i in trainable_params if \n",
    "                                    ('value_head' not in i.name)]\n",
    "                tp_value = [i for i in trainable_params if \n",
    "                                    ('policy_head' not in i.name)]\n",
    "\n",
    "                add_grad_to_list(optimizer_policy,tp_policy,policy_loss,tower_gradients_policy)\n",
    "                add_grad_to_list(optimizer_value,tp_value,value_loss,tower_gradients_value)\n",
    "                add_grad_to_list(optimizer_multitarg,trainable_params,multitarget_loss,tower_gradients_multitarg)\n",
    "               \n",
    "    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n",
    "    with tf.control_dependencies(update_ops):\n",
    "        #gradients_policy = average_gradients(tower_gradients_policy)\n",
    "        train_op_policy = get_op_mul(tower_gradients_policy,optimizer_policy,global_step)\n",
    "        train_op_value = get_op_mul(tower_gradients_value,optimizer_value,global_step)\n",
    "        train_op_multitarg = get_op_mul(tower_gradients_multitarg,optimizer_multitarg,global_step)\n",
    "        #train_op = optimizer.minimize(policy_loss,global_step=global_step)\n",
    "    net_softmax = merge(net_softmax_collection)\n",
    "    value_head = merge(value_head_collection)\n",
    "    multitarget_loss = reduce_mean(multitarget_loss_collection)\n",
    "    value_loss = reduce_mean(value_loss_collection)\n",
    "    policy_loss = reduce_mean(policy_loss_collection)\n",
    "    accuracy_select = reduce_mean(accuracy_select_collection)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with graph.as_default():\n",
    "    config = tf.ConfigProto()\n",
    "    config.gpu_options.allow_growth = True\n",
    "    config.allow_soft_placement = True\n",
    "    sess = tf.Session(config=config)\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    \n",
    "    tf.train.global_step(sess, global_step)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mkdir: 无法创建目录\"models\": 文件已存在\r\n"
     ]
    }
   ],
   "source": [
    "!mkdir models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import os\n",
    "if not os.path.exists(\"models/{}\".format(model_name)):\n",
    "    os.mkdir(\"models/{}\".format(model_name))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "N_BATCH = 10000 * (128 / BATCH_SIZE)\n",
    "N_BATCH_TEST = 300 * (128 / BATCH_SIZE)\n",
    "N_BATCH = int(N_BATCH)\n",
    "N_BATCH_TEST = int(N_BATCH_TEST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(2500, 75)"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "N_BATCH,N_BATCH_TEST"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0, 512)"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ind * (BATCH_SIZE // len(GPU_CORE)),(ind + 1) * (BATCH_SIZE // len(GPU_CORE))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "with graph.as_default():\n",
    "    train_epoch = 30\n",
    "    train_batch = 0\n",
    "    saver = tf.train.Saver(var_list=tf.global_variables())\n",
    "    saver.restore(sess,\"models/{}/model_{}\".format(model_name,train_epoch - 1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Restoring parameters from models/5_29_resnet_joint-two_stage/model_1\n"
     ]
    }
   ],
   "source": [
    "with graph.as_default():\n",
    "    train_epoch = 58\n",
    "    train_batch = 0\n",
    "    saver = tf.train.Saver(var_list=tf.global_variables())\n",
    "    saver.restore(sess,\"models/{}/model_{}\".format(model_name,1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "with graph.as_default():\n",
    "    saver = tf.train.Saver(var_list=tf.global_variables())\n",
    "    saver.save(sess,\"../data/prepare_weight/2018-06-20_09-00-14\".format(model_name,2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_epoch = 1\n",
    "train_batch = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "EPOCH 1 STEP 2499 LR 0.01 ACC 21.25 LOSS 4.37 value_loss 0.47 100.00 % [==================================================>] 1280000/1280000 \t used:912s eta:0 ssed:0s eta:2094 s\n",
      " epoch 1 100.00 % [==================================================>] 320000/320000 \t used:234s eta:0 sEPOCH 1 valid loss 4.34023904800415 value loss 0.58978670835495 acc 21.5515625\n",
      "\n",
      "EPOCH 2 STEP 2499 LR 0.01 ACC 24.26 LOSS 3.6 value_loss 0.5 100.00 % [==================================================>] 1280000/1280000 \t used:934s eta:0 ssss\n",
      " epoch 2 100.00 % [==================================================>] 320000/320000 \t used:225s eta:0 sEPOCH 2 valid loss 3.621549606323242 value loss 0.5406498908996582 acc 23.2821875\n",
      "\n",
      "EPOCH 3 STEP 2499 LR 0.01 ACC 25.67 LOSS 3.27 value_loss 0.48 100.00 % [==================================================>] 1280000/1280000 \t used:894s eta:0 ss\n",
      " epoch 3 100.00 % [==================================================>] 320000/320000 \t used:229s eta:0 sEPOCH 3 valid loss 3.267334461212158 value loss 0.5795044898986816 acc 25.208125\n",
      "\n",
      "EPOCH 4 STEP 2499 LR 0.01 ACC 26.71 LOSS 3.07 value_loss 0.46 100.00 % [==================================================>] 1280000/1280000 \t used:892s eta:0 ss\n",
      " epoch 4 100.00 % [==================================================>] 320000/320000 \t used:227s eta:0 sEPOCH 4 valid loss 3.08573055267334 value loss 0.5267994999885559 acc 26.654375\n",
      "\n",
      "EPOCH 5 STEP 2499 LR 0.01 ACC 28.15 LOSS 2.94 value_loss 0.49 100.00 % [==================================================>] 1280000/1280000 \t used:892s eta:0 ss\n",
      " epoch 5 100.00 % [==================================================>] 320000/320000 \t used:226s eta:0 sEPOCH 5 valid loss 2.942870616912842 value loss 0.5398314595222473 acc 27.7640625\n",
      "\n",
      "EPOCH 6 STEP 2499 LR 0.01 ACC 29.15 LOSS 2.87 value_loss 0.45 100.00 % [==================================================>] 1280000/1280000 \t used:890s eta:0 ss\n",
      " epoch 6 100.00 % [==================================================>] 320000/320000 \t used:227s eta:0 sEPOCH 6 valid loss 2.879871368408203 value loss 0.5321686863899231 acc 28.69375\n",
      "\n",
      "EPOCH 7 STEP 2499 LR 0.01 ACC 30.4 LOSS 2.78 value_loss 0.44 100.00 % [==================================================>] 1280000/1280000 \t used:893s eta:0 sss\n",
      " epoch 7 100.00 % [==================================================>] 320000/320000 \t used:228s eta:0 sEPOCH 7 valid loss 2.81607723236084 value loss 0.5438942313194275 acc 29.3884375\n",
      "\n",
      "EPOCH 8 STEP 2499 LR 0.01 ACC 30.57 LOSS 2.75 value_loss 0.46 100.00 % [==================================================>] 1280000/1280000 \t used:893s eta:0 ss\n",
      " epoch 8 100.00 % [==================================================>] 320000/320000 \t used:228s eta:0 sEPOCH 8 valid loss 2.762759208679199 value loss 0.5544540286064148 acc 29.9734375\n",
      "\n",
      "EPOCH 9 STEP 2499 LR 0.01 ACC 31.63 LOSS 2.67 value_loss 0.45 100.00 % [==================================================>] 1280000/1280000 \t used:894s eta:0 ss\n",
      " epoch 9 100.00 % [==================================================>] 320000/320000 \t used:227s eta:0 sEPOCH 9 valid loss 2.719318389892578 value loss 0.5206046104431152 acc 30.6115625\n",
      "\n",
      "EPOCH 10 STEP 2499 LR 0.01 ACC 32.08 LOSS 2.63 value_loss 0.45 100.00 % [==================================================>] 1280000/1280000 \t used:894s eta:0 ss\n",
      " epoch 10 100.00 % [==================================================>] 320000/320000 \t used:225s eta:0 sEPOCH 10 valid loss 2.685540199279785 value loss 0.530287504196167 acc 30.92875\n",
      "\n",
      "EPOCH 11 STEP 2499 LR 0.01 ACC 32.5 LOSS 2.59 value_loss 0.44 100.00 % [==================================================>] 1280000/1280000 \t used:891s eta:0 sss\n",
      " epoch 11 100.00 % [==================================================>] 320000/320000 \t used:226s eta:0 sEPOCH 11 valid loss 2.6427855491638184 value loss 0.5211501717567444 acc 31.7421875\n",
      "\n",
      "EPOCH 12 STEP 520 LR 0.01 ACC 31.69 LOSS 2.64 value_loss 0.45 20.84 % [==========>----------------------------------------] 266752/1280000 \t used:187s eta:711 s"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception in thread Thread-5:\n",
      "Traceback (most recent call last):\n",
      "  File \"/usr/local/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\n",
      "    self.run()\n",
      "  File \"/usr/local/lib/python3.6/threading.py\", line 864, in run\n",
      "    self._target(*self._args, **self._kwargs)\n",
      "  File \"/usr/local/lib/python3.6/site-packages/tflearn/data_flow.py\", line 187, in fill_feed_dict_queue\n",
      "    data = self.retrieve_data(batch_ids)\n",
      "  File \"/usr/local/lib/python3.6/site-packages/tflearn/data_flow.py\", line 222, in retrieve_data\n",
      "    utils.slice_array(self.feed_dict[key], batch_ids)\n",
      "  File \"/usr/local/lib/python3.6/site-packages/tflearn/utils.py\", line 187, in slice_array\n",
      "    return X[start]\n",
      "  File \"<ipython-input-10-3b34ce9ec91d>\", line 67, in __getitem__\n",
      "    x1,y1,val1 = self.batch_iter.__next__()\n",
      "  File \"<ipython-input-10-3b34ce9ec91d>\", line 25, in __iter\n",
      "    onefile = filelist.pop()\n",
      "IndexError: pop from empty list\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\r",
      "EPOCH 12 STEP 521 LR 0.01 ACC 31.61 LOSS 2.65 value_loss 0.45 20.88 % [==========>----------------------------------------] 267264/1280000 \t used:187s eta:711 s"
     ]
    }
   ],
   "source": [
    "restore = True\n",
    "N_EPOCH = 100\n",
    "DECAY_EPOCH = 40\n",
    "\n",
    "class ExpVal:\n",
    "    def __init__(self,exp_a=0.97):\n",
    "        self.val = None\n",
    "        self.exp_a = exp_a\n",
    "    def update(self,newval):\n",
    "        if self.val == None:\n",
    "            self.val = newval\n",
    "        else:\n",
    "            self.val = self.exp_a * self.val + (1 - self.exp_a) * newval\n",
    "    def getval(self):\n",
    "        return round(self.val,2)\n",
    "    \n",
    "expacc_move = ExpVal()\n",
    "exploss = ExpVal()\n",
    "expsteploss = ExpVal()\n",
    "\n",
    "begining_learning_rate = 1e-2\n",
    "\n",
    "pred_image = None\n",
    "if restore == False:\n",
    "    train_epoch = 1\n",
    "    train_batch = 0\n",
    "for one_epoch in range(train_epoch,N_EPOCH):\n",
    "    train_epoch = one_epoch\n",
    "    pb = ProgressBar(worksum=N_BATCH * BATCH_SIZE,info=\" epoch {} batch {}\".format(train_epoch,train_batch))\n",
    "    pb.startjob()\n",
    "    \n",
    "    for one_batch in range(N_BATCH):\n",
    "        if restore == True and one_batch < train_batch:\n",
    "            pb.auto_display = False\n",
    "            pb.complete(BATCH_SIZE)\n",
    "            pb.auto_display = True\n",
    "            continue\n",
    "        else:\n",
    "            restore = False\n",
    "        train_batch = one_batch\n",
    "        \n",
    "        batch_x,batch_y,batch_v = trainflow.next()['data']\n",
    "        batch_v = np.expand_dims(np.nan_to_num(batch_v),1)\n",
    "        # learning rate decay strategy\n",
    "        batch_lr = begining_learning_rate * 2 ** -(one_epoch // DECAY_EPOCH)\n",
    "        with graph.as_default():\n",
    "            _,step_loss,step_acc_move,step_value = sess.run(\n",
    "                [train_op_policy,policy_loss,accuracy_select,global_step],feed_dict={\n",
    "                    X:batch_x,nextmove:batch_y,learning_rate:batch_lr,training:True,\n",
    "                })\n",
    "            _,step_value_loss,step_val_predict = sess.run(\n",
    "                [train_op_value,value_loss,value_head],feed_dict={\n",
    "                    X:batch_x,learning_rate:batch_lr,training:True,score:batch_v,\n",
    "                })\n",
    "            batch_v = - batch_v\n",
    "            batch_x = np.concatenate((batch_x[:,::-1,:,7:],batch_x[:,::-1,:,:7]),axis=-1)\n",
    "            _,step_value_loss,step_val_predict = sess.run(\n",
    "                [train_op_value,value_loss,value_head],feed_dict={\n",
    "                    X:batch_x,learning_rate:batch_lr,training:True,score:batch_v,\n",
    "                })\n",
    "            \n",
    "        \n",
    "        step_acc_move *= 100\n",
    "        \n",
    "        expacc_move.update(step_acc_move)\n",
    "        exploss.update(step_loss)\n",
    "        expsteploss.update(step_value_loss)\n",
    "\n",
    "       \n",
    "        pb.info = \"EPOCH {} STEP {} LR {} ACC {} LOSS {} value_loss {}\".format(\n",
    "            one_epoch,one_batch,batch_lr,expacc_move.getval(),exploss.getval(),expsteploss.getval())\n",
    "        \n",
    "        pb.complete(BATCH_SIZE)\n",
    "    print()\n",
    "    pb = ProgressBar(worksum=N_BATCH // 4 * BATCH_SIZE,info=\" epoch {}\".format(train_epoch))\n",
    "    pb.startjob()\n",
    "    losses = []\n",
    "    value_losses = []\n",
    "    accs = []\n",
    "    for one_batch in range(N_BATCH // 4):\n",
    "        batch_x,batch_y,batch_v = testflow.next()['data']\n",
    "        batch_v = np.expand_dims(np.nan_to_num(batch_v),1)\n",
    "        # learning rate decay strategy\n",
    "        batch_lr = begining_learning_rate * 10 ** -(one_epoch // DECAY_EPOCH)\n",
    "        with graph.as_default():\n",
    "            step_loss,step_value_loss,step_acc_move,step_value = sess.run(\n",
    "                [policy_loss,value_loss,accuracy_select,global_step],feed_dict={\n",
    "                    X:batch_x,nextmove:batch_y,learning_rate:batch_lr,training:False,score:batch_v,\n",
    "                })\n",
    "        \n",
    "        step_acc_move *= 100\n",
    "        losses.append(step_loss)\n",
    "        accs.append(step_acc_move)\n",
    "        value_losses.append(step_value_loss)\n",
    "        pb.complete(BATCH_SIZE)\n",
    "    print(\"EPOCH {} valid loss {} value loss {} acc {}\".format(train_epoch,np.average(losses)\n",
    "                                                               ,np.average(value_losses),np.average(accs)))\n",
    "    print()\n",
    "    with graph.as_default():\n",
    "        saver = tf.train.Saver(var_list=tf.global_variables())\n",
    "        saver.save(sess,\"models/{}/model_{}\".format(model_name,one_epoch))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "ind = 9"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "gi = convert_game_value(testset.filelist[ind],testset.feature_list,pgn2value)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "gb = convert_game_board(testset.filelist[ind],testset.feature_list,pgn2value)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "0 砗─碼─象─士─将─士─象─碼─砗\n",
      "  │  │  │  │＼│／│　│　│　│\n",
      "1 ├─┼─┼─┼─※─┼─┼─┼─┤\n",
      "  │　│　│　│／│＼│　│　│　│\n",
      "2 ├─砲─┼─┼─┼─┼─┼─砲─┤\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "3 卒─┼─卒─┼─卒─┼─卒─┼─卒\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "4 ├─┴─┴─┴─┴─┴─┴─┴─┤\n",
      "  │　                         　 │\n",
      "5 ├─┬─┬─┬─┬─┬─┬─┬─┤\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "6 兵─┼─兵─┼─兵─┼─兵─┼─兵\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "7 ├─炮─┼─┼─┼─┼─┼─炮─┤\n",
      "  │　│　│　│＼│／│　│　│　│\n",
      "8 ├─┼─┼─┼─※─┼─┼─┼─┤\n",
      "  │　│　│　│／│＼│　│　│　│\n",
      "9 车─马─相─仕─帅─仕─相─马─车\n",
      "  0   1   2   3   4   5   6   7   8\n",
      "\n",
      "[0.28047004, 0.22795878, 0.1119001, 0.10171503, 0.06740769, 0.063189253, 0.034739129, 0.033217646, 0.024611259, 0.023836587]\n",
      "1 0.0220189 1.0 (7, 7, 4, 7)\n",
      "\n",
      "0 砗─碼─象─士─将─士─象─碼─砗\n",
      "  │  │  │  │＼│／│　│　│　│\n",
      "1 ├─┼─┼─┼─※─┼─┼─┼─┤\n",
      "  │　│　│　│／│＼│　│　│　│\n",
      "2 ├─砲─┼─┼─┼─┼─┼─砲─┤\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "3 卒─┼─卒─┼─卒─┼─卒─┼─卒\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "4 ├─┴─┴─┴─┴─┴─┴─┴─┤\n",
      "  │　                         　 │\n",
      "5 ├─┬─┬─┬─┬─┬─┬─┬─┤\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "6 兵─┼─兵─┼─兵─┼─兵─┼─兵\n",
      "  │　│　│　│　│　│　│　│　│\n",
      "7 ├─炮─┼─┼─炮─┼─┼─┼─┤\n",
      "  │　│　│　│＼│／│　│　│　│\n",
      "8 ├─┼─┼─┼─※─┼─┼─┼─┤\n",
      "  │　│　│　│／│＼│　│　│　│\n",
      "9 车─马─相─仕─帅─仕─相─马─车\n",
      "  0   1   2   3   4   5   6   7   8\n",
      "\n",
      "[0.74792457, 0.1390121, 0.094127417, 0.011864004, 0.0030493992, 0.0019161324, 0.00053097151, 0.00026088505, 0.00015622388, 0.00015331581]\n",
      "2 0.145185 1.0 (7, 0, 6, 2)\n"
     ]
    }
   ],
   "source": [
    "red = False\n",
    "xx = 0\n",
    "while True:\n",
    "    xx += 1\n",
    "    red = not red\n",
    "    bx,ba,bb = gi.__next__()\n",
    "    board_status,move = gb.__next__()\n",
    "\n",
    "    #print(np.sum(bx,axis=0))\n",
    "    x1 = np.transpose(bx,[1,2,0])\n",
    "    x1 = np.expand_dims(x1,axis=0)\n",
    "\n",
    "    x2 = np.concatenate((x1[:,::-1,:,7:],x1[:,::-1,:,:7]),axis=-1)\n",
    "    board_status.print_board()\n",
    "\n",
    "    with graph.as_default():\n",
    "        pred,pp = sess.run(\n",
    "            [value_head,net_softmax],feed_dict={\n",
    "                X:x1,training:False,\n",
    "            })\n",
    "        pred1 = sess.run(\n",
    "            [value_head],feed_dict={\n",
    "                X:x2,training:False,\n",
    "            })\n",
    "    #pp = list(zip(pp,))\n",
    "    print(sorted(pp.reshape(-1))[::-1][:10])\n",
    "    pred = pred[0][0]#(pred[0][0] - pred1[0][0]) / 2\n",
    "    #pred = (pred[0][0] - pred1[0][0]) / 2\n",
    "    if red:\n",
    "        print(xx,pred,bb,move)\n",
    "    else:\n",
    "        print(xx, - pred,-bb,move)\n",
    "    if xx >= 2:\n",
    "        break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1, 1, 1, 1, 1, 1, 1, 1, 1],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [0, 1, 0, 0, 1, 0, 0, 0, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [1, 0, 1, 0, 1, 0, 1, 0, 1],\n",
       "       [0, 1, 0, 0, 0, 0, 0, 1, 0],\n",
       "       [0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
       "       [1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=uint64)"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.sum(bx,axis=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1957年全国象棋个人赛.cbf\n",
      "1958年全国象棋个人赛.cbf\n",
      "1959年全运会.cbf\n",
      "1960年全国个人赛.cbf\n",
      "1962年全国象棋个人赛.cbf\n",
      "1966年全国个人赛.cbf\n",
      "1966年全国象棋个人赛 {97D7C772-104C-4EB7-AB6A-AFCEAB43F143}.cbf\n",
      "1966年全国象棋个人赛.cbf\n",
      "1978年全国象棋团体赛.cbf\n",
      "1989年棋王预选赛 {1E3ECFB4-88C6-45AF-9A6D-29A9369CB395}.cbf\n",
      "ls: 写入错误: 断开的管道\n"
     ]
    }
   ],
   "source": [
    "! ls  'data/imsa-cbf/' | head"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "﻿<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r",
      "\r\n",
      "<ChineseChessRecord Version=\"1.0\">\r",
      "\r\n",
      " <Head>\r",
      "\r\n",
      "  <Name>1957年全国象棋个人赛</Name>\r",
      "\r\n",
      "  <URL />\r",
      "\r\n",
      "  <From>10497.pgn</From>\r",
      "\r\n",
      "  <ContestType />\r",
      "\r\n",
      "  <Contest />\r",
      "\r\n",
      "  <Round>第15轮</Round>\r",
      "\r\n",
      "  <Group />\r",
      "\r\n",
      "  <Table />\r",
      "\r\n",
      "  <Date>1957-11-16</Date>\r",
      "\r\n",
      "  <Site>上海</Site>\r",
      "\r\n",
      "  <TimeRule />\r",
      "\r\n",
      "  <Red>李义庭</Red>\r",
      "\r\n",
      "  <RedTeam>武汉</RedTeam>\r",
      "\r\n",
      "  <RedTime />\r",
      "\r\n",
      "  <RedRating />\r",
      "\r\n",
      "  <Black>王嘉良</Black>\r",
      "\r\n",
      "  <BlackTeam>哈尔滨</BlackTeam>\r",
      "\r\n",
      "  <BlackTime />\r",
      "\r\n",
      "  <BlackRating />\r",
      "\r\n",
      "  <Referee />\r",
      "\r\n",
      "  <Recorder />\r",
      "\r\n",
      "  <Commentator />\r",
      "\r\n",
      "  <CommentatorURL />\r",
      "\r\n",
      "  <Creator />\r",
      "\r\n",
      "  <CreatorURL />\r",
      "\r\n",
      "  <DateCreated />\r",
      "\r\n",
      "  <DateModified>2017-11-04 18:20:55</DateModified>\r",
      "\r\n",
      "  <ECCO>D21</ECCO>\r",
      "\r\n",
      "  <RecordType>1</RecordType>\r",
      "\r\n",
      "  <RecordKind />\r",
      "\r\n",
      "  <RecordResult>0</RecordResult>\r",
      "\r\n",
      "  <ResultType />\r",
      "\r\n",
      "  <FEN>rnbakabnr/9/1c5c1/p1p1p1p1p/9/9/P1P1P1P1P/1C5C1/9/RNBAKABNR w - - 0 1</FEN>\r",
      "\r\n",
      " </Head>\r",
      "\r\n",
      " <MoveList>\r",
      "\r\n",
      "  <Move value=\"00-00\" />\r",
      "\r\n",
      "  <Move value=\"77-47\" />\r",
      "\r\n",
      "  <Move value=\"72-42\" />\r",
      "\r\n",
      "  <Move value=\"79-67\" />\r",
      "\r\n",
      "  <Move value=\"80-81\" />\r",
      "\r\n",
      "  <Move value=\"89-79\" />\r",
      "\r\n",
      "  <Move value=\"70-62\" />\r",
      "\r\n",
      "  <Move value=\"59-48\" />\r",
      "\r\n",
      "  <Move value=\"81-51\" />\r",
      "\r\n",
      "  <Move value=\"79-73\" />\r",
      "\r\n",
      "  <Move value=\"10-22\" />\r",
      "\r\n",
      "  <Move value=\"26-25\" />\r",
      "\r\n",
      "  <Move value=\"51-54\" />\r",
      "\r\n",
      "  <Move value=\"17-27\" />\r",
      "\r\n",
      "  <Move value=\"23-24\" />\r",
      "\r\n",
      "  <Move value=\"73-63\" />\r",
      "\r\n",
      "  <Move value=\"22-14\" />\r",
      "\r\n",
      "  <Move value=\"63-65\" />\r",
      "\r\n",
      "  <Move value=\"30-41\" />\r",
      "\r\n",
      "  <Move value=\"47-57\" />\r",
      "\r\n",
      "  <Move value=\"62-74\" />\r",
      "\r\n",
      "  <Move value=\"29-47\" />\r",
      "\r\n",
      "  <Move value=\"24-25\" />\r",
      "\r\n",
      "  <Move value=\"65-25\" />\r",
      "\r\n",
      "  <Move value=\"42-62\" />\r",
      "\r\n",
      "  <Move value=\"67-79\" />\r",
      "\r\n",
      "  <Move value=\"20-42\" />\r",
      "\r\n",
      "  <Move value=\"27-17\" />\r",
      "\r\n",
      "  <Move value=\"12-17\" />\r",
      "\r\n",
      "  <Move value=\"57-17\" />\r",
      "\r\n",
      "  <Move value=\"00-20\" />\r",
      "\r\n",
      "  <Move value=\"25-20\" />\r",
      "\r\n",
      "  <Move value=\"42-20\" />\r",
      "\r\n",
      "  <Move value=\"19-07\" />\r",
      "\r\n",
      "  <Move value=\"74-66\" />\r",
      "\r\n",
      "  <Move value=\"79-87\" />\r",
      "\r\n",
      "  <Move value=\"66-58\" />\r",
      "\r\n",
      "  <Move value=\"17-18\" />\r",
      "\r\n",
      "  <Move value=\"54-64\" />\r",
      "\r\n",
      "  <Move value=\"18-58\" />\r",
      "\r\n",
      "  <Move value=\"64-69\" />\r",
      "\r\n",
      "  <Move value=\"58-59\" />\r",
      "\r\n",
      "  <Move value=\"69-67\" />\r",
      "\r\n",
      "  <Move value=\"87-79\" />\r",
      "\r\n",
      "  <Move value=\"67-47\" />\r",
      "\r\n",
      "  <Move value=\"59-57\" />\r",
      "\r\n",
      "  <Move value=\"62-22\" />\r",
      "\r\n",
      "  <Move value=\"49-59\" />\r",
      "\r\n",
      "  <Move value=\"47-46\" />\r",
      "\r\n",
      "  <Move value=\"09-19\" />\r",
      "\r\n",
      "  <Move value=\"46-86\" />\r",
      "\r\n",
      "  <Move value=\"59-49\" />\r",
      "\r\n",
      "  <Move value=\"14-35\" />\r",
      "\r\n",
      "  <Move value=\"19-29\" />\r",
      "\r\n",
      "  <Move value=\"22-27\" />\r",
      "\r\n",
      "  <Move value=\"48-59\" />\r",
      "\r\n",
      "  <Move value=\"20-42\" />\r",
      "\r\n",
      "  <Move value=\"57-37\" />\r",
      "\r\n",
      "  <Move value=\"86-06\" />\r",
      "\r\n",
      "  <Move value=\"39-48\" />\r",
      "\r\n",
      "  <Move value=\"03-04\" />\r",
      "\r\n",
      "  <Move value=\"79-58\" />\r",
      "\r\n",
      "  <Move value=\"04-05\" />\r",
      "\r\n",
      "  <Move value=\"07-19\" />\r",
      "\r\n",
      "  <Move value=\"27-24\" />\r",
      "\r\n",
      "  <Move value=\"29-25\" />\r",
      "\r\n",
      "  <Move value=\"35-56\" />\r",
      "\r\n",
      "  <Move value=\"19-27\" />\r",
      "\r\n",
      "  <Move value=\"24-44\" />\r",
      "\r\n",
      "  <Move value=\"48-57\" />\r",
      "\r\n",
      "  <Move value=\"06-36\" />\r",
      "\r\n",
      "  <Move value=\"25-05\" />\r",
      "\r\n",
      "  <Move value=\"56-37\" />\r",
      "\r\n",
      "  <Move value=\"58-37\" />\r",
      "\r\n",
      "  <Move value=\"36-37\" />\r",
      "\r\n",
      "  <Move value=\"27-35\" />\r",
      "\r\n",
      "  <Move value=\"37-57\" />\r",
      "\r\n",
      "  <Move value=\"35-43\" />\r",
      "\r\n",
      "  <Move value=\"57-47\" />\r",
      "\r\n",
      "  <Move value=\"49-39\" />\r",
      "\r\n",
      "  <Move value=\"47-37\" />\r",
      "\r\n",
      "  <Move value=\"39-49\" />\r",
      "\r\n",
      "  <Move value=\"37-33\" />\r",
      "\r\n",
      "  <Move value=\"43-62\" />\r",
      "\r\n",
      "  <Move value=\"33-63\" />\r",
      "\r\n",
      "  <Move value=\"05-00\" />\r",
      "\r\n",
      "  <Move value=\"41-30\" />\r",
      "\r\n",
      "  <Move value=\"00-04\" />\r",
      "\r\n",
      "  <Move value=\"44-64\" end=\"1\" />\r",
      "\r\n",
      " </MoveList>\r",
      "\r\n",
      "</ChineseChessRecord>\r",
      "\r\n"
     ]
    }
   ],
   "source": [
    "! cat 'data/imsa-cbf/1957年全国象棋个人赛.cbf'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "77-47 h2e2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'a': '0',\n",
       " 'b': '1',\n",
       " 'c': '2',\n",
       " 'd': '3',\n",
       " 'e': '4',\n",
       " 'f': '5',\n",
       " 'g': '6',\n",
       " 'h': '7',\n",
       " 'i': '8'}"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dict(zip('abcdefghi','012345678'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'0': '9',\n",
       " '1': '8',\n",
       " '2': '7',\n",
       " '3': '6',\n",
       " '4': '5',\n",
       " '5': '4',\n",
       " '6': '3',\n",
       " '7': '2',\n",
       " '8': '1',\n",
       " '9': '0'}"
      ]
     },
     "execution_count": 67,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dict(zip('9876543210','0123456789'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "tf1.3_python",
   "language": "python",
   "name": "tf1.3_kernel"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
