{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#######################################################################\n",
    "# Copyright (C)                                                       #\n",
    "# 2016 Shangtong Zhang(zhangshangtong.cpp@gmail.com)                  #\n",
    "# 2016 Jan Hakenberg(jan.hakenberg@gmail.com)                         #\n",
    "# 2016 Tian Jun(tianjun.cpp@gmail.com)                                #\n",
    "# 2016 Kenta Shimada(hyperkentakun@gmail.com)                         #\n",
    "# Permission given to modify the code as long as you keep this        #\n",
    "# declaration at the top                                              #\n",
    "#######################################################################\n",
    "\n",
    "from __future__ import print_function\n",
    "import numpy as np\n",
    "import pickle\n",
    "\n",
    "BOARD_ROWS = 3\n",
    "BOARD_COLS = 3\n",
    "BOARD_SIZE = BOARD_ROWS * BOARD_COLS\n",
    "\n",
    "class State:\n",
    "    def __init__(self):\n",
    "        # the board is represented by a n * n array,\n",
    "        # 1 represents chessman of the player who moves first,\n",
    "        # -1 represents chessman of another player\n",
    "        # 0 represents empty position\n",
    "        self.data = np.zeros((BOARD_ROWS, BOARD_COLS))\n",
    "        self.winner = None\n",
    "        self.hashVal = None\n",
    "        self.end = None\n",
    "\n",
    "    # calculate the hash value for one state, it's unique\n",
    "    def getHash(self):\n",
    "        if self.hashVal is None:\n",
    "            self.hashVal = 0\n",
    "            for i in self.data.reshape(BOARD_ROWS * BOARD_COLS):\n",
    "                if i == -1:\n",
    "                    i = 2\n",
    "                self.hashVal = self.hashVal * 3 + i\n",
    "        return int(self.hashVal)\n",
    "\n",
    "    # determine whether a player has won the game, or it's a tie\n",
    "    def isEnd(self):\n",
    "        if self.end is not None:\n",
    "            return self.end\n",
    "        results = []\n",
    "        # check row\n",
    "        for i in range(0, BOARD_ROWS):\n",
    "            results.append(np.sum(self.data[i, :]))\n",
    "        # check columns\n",
    "        for i in range(0, BOARD_COLS):\n",
    "            results.append(np.sum(self.data[:, i]))\n",
    "\n",
    "        # check diagonals\n",
    "        results.append(0)\n",
    "        for i in range(0, BOARD_ROWS):\n",
    "            results[-1] += self.data[i, i]\n",
    "        results.append(0)\n",
    "        for i in range(0, BOARD_ROWS):\n",
    "            results[-1] += self.data[i, BOARD_ROWS - 1 - i]\n",
    "\n",
    "        for result in results:\n",
    "            if result == 3:\n",
    "                self.winner = 1\n",
    "                self.end = True\n",
    "                return self.end\n",
    "            if result == -3:\n",
    "                self.winner = -1\n",
    "                self.end = True\n",
    "                return self.end\n",
    "\n",
    "        # whether it's a tie\n",
    "        sum = np.sum(np.abs(self.data))\n",
    "        if sum == BOARD_ROWS * BOARD_COLS:\n",
    "            self.winner = 0\n",
    "            self.end = True\n",
    "            return self.end\n",
    "\n",
    "        # game is still going on\n",
    "        self.end = False\n",
    "        return self.end\n",
    "\n",
    "    # @symbol 1 or -1\n",
    "    # put chessman symbol in position (i, j)\n",
    "    def nextState(self, i, j, symbol):\n",
    "        newState = State()\n",
    "        newState.data = np.copy(self.data)\n",
    "        newState.data[i, j] = symbol\n",
    "        return newState\n",
    "\n",
    "    # print the board\n",
    "    def show(self):\n",
    "        for i in range(0, BOARD_ROWS):\n",
    "            print('-------------')\n",
    "            out = '| '\n",
    "            for j in range(0, BOARD_COLS):\n",
    "                if self.data[i, j] == 1:\n",
    "                    token = '*'\n",
    "                if self.data[i, j] == 0:\n",
    "                    token = '0'\n",
    "                if self.data[i, j] == -1:\n",
    "                    token = 'x'\n",
    "                out += token + ' | '\n",
    "            print(out)\n",
    "        print('-------------')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Judger:\n",
    "    # @player1: player who will move first, its chessman will be 1\n",
    "    # @player2: another player with chessman -1\n",
    "    # @feedback: if True, both players will receive rewards when game is end\n",
    "    def __init__(self, player1, player2, feedback=True):\n",
    "        self.p1 = player1\n",
    "        self.p2 = player2\n",
    "        self.feedback = feedback\n",
    "        self.currentPlayer = None\n",
    "        self.p1Symbol = 1\n",
    "        self.p2Symbol = -1\n",
    "        self.p1.setSymbol(self.p1Symbol)\n",
    "        self.p2.setSymbol(self.p2Symbol)\n",
    "        self.currentState = State()\n",
    "        self.allStates = allStates\n",
    "\n",
    "    # give reward to two players\n",
    "    def giveReward(self):\n",
    "        if self.currentState.winner == self.p1Symbol:\n",
    "            self.p1.feedReward(1)\n",
    "            self.p2.feedReward(0)\n",
    "        elif self.currentState.winner == self.p2Symbol:\n",
    "            self.p1.feedReward(0)\n",
    "            self.p2.feedReward(1)\n",
    "        else:\n",
    "            self.p1.feedReward(0.1)\n",
    "            self.p2.feedReward(0.5)\n",
    "\n",
    "    def feedCurrentState(self):\n",
    "        self.p1.feedState(self.currentState)\n",
    "        self.p2.feedState(self.currentState)\n",
    "\n",
    "    def reset(self):\n",
    "        self.p1.reset()\n",
    "        self.p2.reset()\n",
    "        self.currentState = State()\n",
    "        self.currentPlayer = None\n",
    "\n",
    "    # @show: if True, print each board during the game\n",
    "    def play(self, show=False):\n",
    "        self.reset()\n",
    "        self.feedCurrentState()\n",
    "        while True:\n",
    "            # set current player\n",
    "            if self.currentPlayer == self.p1:\n",
    "                self.currentPlayer = self.p2\n",
    "            else:\n",
    "                self.currentPlayer = self.p1\n",
    "            if show:\n",
    "                self.currentState.show()\n",
    "            [i, j, symbol] = self.currentPlayer.takeAction()\n",
    "            self.currentState = self.currentState.nextState(i, j, symbol)\n",
    "            hashValue = self.currentState.getHash()\n",
    "            self.currentState, isEnd = self.allStates[hashValue]\n",
    "            self.feedCurrentState()\n",
    "            if isEnd:\n",
    "                if self.feedback:\n",
    "                    self.giveReward()\n",
    "                return self.currentState.winner\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# AI player\n",
    "class Player:\n",
    "    # @stepSize: step size to update estimations\n",
    "    # @exploreRate: possibility to explore\n",
    "    def __init__(self, stepSize = 0.1, exploreRate=0.1):\n",
    "        self.allStates = allStates\n",
    "        self.estimations = dict()\n",
    "        self.stepSize = stepSize\n",
    "        self.exploreRate = exploreRate\n",
    "        self.states = []\n",
    "\n",
    "    def reset(self):\n",
    "        self.states = []\n",
    "\n",
    "    def setSymbol(self, symbol):\n",
    "        self.symbol = symbol\n",
    "        for hash in self.allStates.keys():\n",
    "            (state, isEnd) = self.allStates[hash]\n",
    "            if isEnd:\n",
    "                if state.winner == self.symbol:\n",
    "                    self.estimations[hash] = 1.0\n",
    "                else:\n",
    "                    self.estimations[hash] = 0\n",
    "            else:\n",
    "                self.estimations[hash] = 0.5\n",
    "\n",
    "    # accept a state\n",
    "    def feedState(self, state):\n",
    "        self.states.append(state)\n",
    "\n",
    "    # update estimation according to reward\n",
    "    def feedReward(self, reward):\n",
    "        if len(self.states) == 0:\n",
    "            return\n",
    "        self.states = [state.getHash() for state in self.states]\n",
    "        target = reward\n",
    "        for latestState in reversed(self.states):\n",
    "            value = self.estimations[latestState] + self.stepSize * (target - self.estimations[latestState])\n",
    "            self.estimations[latestState] = value\n",
    "            target = value\n",
    "        self.states = []\n",
    "\n",
    "    # determine next action\n",
    "    def takeAction(self):\n",
    "        state = self.states[-1]\n",
    "        nextStates = []\n",
    "        nextPositions = []\n",
    "        for i in range(BOARD_ROWS):\n",
    "            for j in range(BOARD_COLS):\n",
    "                if state.data[i, j] == 0:\n",
    "                    nextPositions.append([i, j])\n",
    "                    nextStates.append(state.nextState(i, j, self.symbol).getHash())\n",
    "        if np.random.binomial(1, self.exploreRate):\n",
    "            np.random.shuffle(nextPositions)\n",
    "            # Not sure if truncating is the best way to deal with exploratory step\n",
    "            # Maybe it's better to only skip this step rather than forget all the history\n",
    "            self.states = []\n",
    "            action = nextPositions[0]\n",
    "            action.append(self.symbol)\n",
    "            return action\n",
    "\n",
    "        values = []\n",
    "        for hash, pos in zip(nextStates, nextPositions):\n",
    "            values.append((self.estimations[hash], pos))\n",
    "        np.random.shuffle(values)\n",
    "        values.sort(key=lambda x: x[0], reverse=True)\n",
    "        action = values[0][1]\n",
    "        action.append(self.symbol)\n",
    "        return action\n",
    "\n",
    "    def savePolicy(self):\n",
    "        fw = open('optimal_policy_' + str(self.symbol), 'wb')\n",
    "        pickle.dump(self.estimations, fw)\n",
    "        fw.close()\n",
    "\n",
    "    def loadPolicy(self):\n",
    "        fr = open('optimal_policy_' + str(self.symbol),'rb')\n",
    "        self.estimations = pickle.load(fr)\n",
    "        fr.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# human interface\n",
    "# input a number to put a chessman\n",
    "# | 1 | 2 | 3 |\n",
    "# | 4 | 5 | 6 |\n",
    "# | 7 | 8 | 9 |\n",
    "class HumanPlayer:\n",
    "    def __init__(self, stepSize = 0.1, exploreRate=0.1):\n",
    "        self.symbol = None\n",
    "        self.currentState = None\n",
    "        return\n",
    "    def reset(self):\n",
    "        return\n",
    "    def setSymbol(self, symbol):\n",
    "        self.symbol = symbol\n",
    "        return\n",
    "    def feedState(self, state):\n",
    "        self.currentState = state\n",
    "        return\n",
    "    def feedReward(self, reward):\n",
    "        return\n",
    "    def takeAction(self):\n",
    "        data = int(input(\"Input your position:\"))\n",
    "        data -= 1\n",
    "        i = data // int(BOARD_COLS)\n",
    "        j = data % BOARD_COLS\n",
    "        if self.currentState.data[i, j] != 0:\n",
    "            return self.takeAction()\n",
    "        return (i, j, self.symbol)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def train(epochs=20000):\n",
    "    player1 = Player()\n",
    "    player2 = Player()\n",
    "    judger = Judger(player1, player2)\n",
    "    player1Win = 0.0\n",
    "    player2Win = 0.0\n",
    "    for i in range(0, epochs):\n",
    "        print(\"Epoch\", i)\n",
    "        winner = judger.play()\n",
    "        if winner == 1:\n",
    "            player1Win += 1\n",
    "        if winner == -1:\n",
    "            player2Win += 1\n",
    "        judger.reset()\n",
    "    print(player1Win / epochs)\n",
    "    print(player2Win / epochs)\n",
    "    player1.savePolicy()\n",
    "    player2.savePolicy()\n",
    "\n",
    "def compete(turns=500):\n",
    "    player1 = Player(exploreRate=0)\n",
    "    player2 = Player(exploreRate=0)\n",
    "    judger = Judger(player1, player2, False)\n",
    "    player1.loadPolicy()\n",
    "    player2.loadPolicy()\n",
    "    player1Win = 0.0\n",
    "    player2Win = 0.0\n",
    "    for i in range(0, turns):\n",
    "        print(\"Epoch\", i)\n",
    "        winner = judger.play()\n",
    "        if winner == 1:\n",
    "            player1Win += 1\n",
    "        if winner == -1:\n",
    "            player2Win += 1\n",
    "        judger.reset()\n",
    "    print(player1Win / turns)\n",
    "    print(player2Win / turns)\n",
    "\n",
    "def play():\n",
    "    while True:\n",
    "        player1 = Player(exploreRate=0)\n",
    "        player2 = HumanPlayer()\n",
    "        judger = Judger(player1, player2, False)\n",
    "        player1.loadPolicy()\n",
    "        winner = judger.play(True)\n",
    "        if winner == player2.symbol:\n",
    "            print(\"Win!\")\n",
    "        elif winner == player1.symbol:\n",
    "            print(\"Lose!\")\n",
    "        else:\n",
    "            print(\"Tie!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getAllStatesImpl(currentState, currentSymbol, allStates):\n",
    "    for i in range(0, BOARD_ROWS):\n",
    "        for j in range(0, BOARD_COLS):\n",
    "            if currentState.data[i][j] == 0:\n",
    "                newState = currentState.nextState(i, j, currentSymbol)\n",
    "                newHash = newState.getHash()\n",
    "                if newHash not in allStates.keys():\n",
    "                    isEnd = newState.isEnd()\n",
    "                    allStates[newHash] = (newState, isEnd)\n",
    "                    if not isEnd:\n",
    "                        getAllStatesImpl(newState, -currentSymbol, allStates)\n",
    "\n",
    "def getAllStates():\n",
    "    currentSymbol = 1\n",
    "    currentState = State()\n",
    "    allStates = dict()\n",
    "    allStates[currentState.getHash()] = (currentState, currentState.isEnd())\n",
    "    getAllStatesImpl(currentState, currentSymbol, allStates)\n",
    "    return allStates\n",
    "\n",
    "# all possible board configurations\n",
    "allStates = getAllStates()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train()\n",
    "compete()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "play()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
