#!/usr/bin/env python3
#encoding:UTF-8

#######################################################################
# Copyright (C)                                                       #
# 2016 - 2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com)           #
# 2016 Jan Hakenberg(jan.hakenberg@gmail.com)                         #
# 2016 Tian Jun(tianjun.cpp@gmail.com)                                #
# 2016 Kenta Shimada(hyperkentakun@gmail.com)                         #
# Permission given to modify the code as long as you keep this        #
# declaration at the top                                              #
#######################################################################

import pickle
import random
import numpy as np
from enviroment.env import BOARD_ROWS 
from enviroment.env import BOARD_COLS 

# 棋盘所有可以表示的状态state状态
# {hash值：（state对象，是否为终止状态bool_end）}
from enviroment.states import all_states 


class Player():
    def __init__(self):
        self.state = None
        self.symbol = None

    def reset_history(self):
        return  
    
    def add_state(self, state):
        return

    # def set_symbol(self, symbol):
    #     self.symbol = symbol
        
    def backup(self):
        return


# AI player
class AI_Player(Player):
    # @step_size: the step size to update estimations
    # @epsilon: the probability to explore
    def __init__(self, symbol, step_size=0.1, epsilon=0.1):
        self.step_size = step_size      # 学习的步长
        self.epsilon = epsilon          # greedy的探索概率值
        self.symbol=symbol   # 设置symbol为棋手标志，+1或-1
        
        self.reset_history()   # 重设所有遍历状态及其greedy标志
        self.reset_estimations()  # 重设estimations为player的动作值，即V值的字典

    def reset_history(self):
        """ 重设所有遍历状态及其greedy标志 """        
        # player的遍历棋盘状态对象的集合列表       
        self.states = []
        # player的遍历棋盘状态对象时是否采用greedy方法的BOOL值
        self.greedy = []
        
    def reset_estimations(self):
        # estimations为player的动作值，即V值的字典
        # key为状态的hash值，value为状态对象的V值
        self.estimations = dict()
        
        # 初始化状态的V值
        # 终止棋局时，胜利V值为1，失败V值为0，和棋为0.5，中间棋局状态均为0.5
        for hash_val, (state, is_end) in all_states.items():#
            if is_end:
                if state.winner == self.symbol:
                    self.estimations[hash_val] = 1.0
                elif state.winner == 0:
                    # we need to distinguish between a tie and a lose
                    self.estimations[hash_val] = 0.5
                else:
                    self.estimations[hash_val] = 0
            else:
                self.estimations[hash_val] = 0.5
                
    def add_state(self, state):
        # 将player所面对的新状态对象加入到集合中
        # 默认加入棋盘对象时采用贪婪greedy方式
        self.states.append(state)
        self.greedy.append(True)

    # update value estimation
    def backup(self):
        # for debug
        # print('player trajectory')
        # for state in self.states:
        #     state.print()

        # 状态值V的更新函数
        # player所有遍历状态的hash值集合
        self.states_hash = [state.hash() for state in self.states]

        for i in reversed(range(len(self.states_hash) - 1)):
            pre_hash = self.states_hash[i]
            next_hash = self.states_hash[i+1]
                     
            # 根据贝尔曼最优方程 采样V(S)用 max q(s,a) 替代, max q(s,a) = r+V(S') ,r=0,采样V(S)可用 max V(S') 替代
            # 采样V(S)=max V(S')
            # v(s)=v(s)+step_size*(reward+v(s')-v(s)) 对v(s)进行更新
            # td_error = self.greedy[i] * (0+self.estimations[next_hash] - self.estimations[pre_hash])    
            td_error = self.greedy[i] * (self.estimations[next_hash] - self.estimations[pre_hash])
            self.estimations[pre_hash] += self.step_size * td_error

    # choose an action based on the state
    def act(self):
        # 从集合中取出当前状态
        state = self.states[-1]
        # 下一步的可能状态,及对应的下棋位置
        next_states_hash = []
        next_positions = []
        for i in range(BOARD_ROWS):
            for j in range(BOARD_COLS):
                if state.data[i, j] == 0:
                    # 选择的新下棋位置
                    next_positions.append([i, j])
                    # 产生的新棋盘状态
                    next_states_hash.append(state.next_state(i, j, self.symbol).hash())
        # 采用均匀随机探索方法
        if np.random.rand() < self.epsilon:
            # 随机选择的动作（下一步的位置）
            action = random.choice(next_positions)
            # symbol为棋手标志，+1或-1,加入到动作元组中
            action.append(self.symbol)
            # 将greedy标志置为False
            self.greedy[-1] = False
            
            #return (动作位置i,j ，symbol)  (三元组)
            return action

        # 采用贪婪greedy方法选取动作，即V值最大的动作
        # values元素为状态V值和动作位置构成的二元组
        values = []
        for hash, pos in zip(next_states_hash, next_positions):
            values.append((self.estimations[hash], pos))
            
        #防止相同最大V值的state多次选择
        #将values元素顺序打乱 
        np.random.shuffle(values)
        #将values元素逆排序
        values.sort(key=lambda x: x[0], reverse=True)
        action = values[0][1]
        action.append(self.symbol)
        return action

    def save_policy(self):
        with open('policy_%s.bin' % ('first' if self.symbol == 1 else 'second'), 'wb') as f:
            pickle.dump(self.estimations, f)

    def load_policy(self):
        with open('policy_%s.bin' % ('first' if self.symbol == 1 else 'second'), 'rb') as f:
            self.estimations = pickle.load(f)


# human interface
# input a number to put a chessman
# | q | w | e |
# | a | s | d |
# | z | x | c |
class HumanPlayer(Player):
    def __init__(self, symbol, **kwargs):
        self.keys = ['q', 'w', 'e', 'a', 's', 'd', 'z', 'x', 'c']
        self.symbol=symbol  #设置symbol为棋手标志，+1或-1
        
    def add_state(self, state):
        self.state=state
     
    def act(self):
        self.state.print()
        key = input("Input your position:")
        data = self.keys.index(key)
        i = data // int(BOARD_COLS)
        j = data % BOARD_COLS
        return (i, j, self.symbol)
