import numpy as np
from mcts.base.base import BaseState,BaseAction
from mcts.searcher.mcts import MCTS
from copy import deepcopy
import time
import logging

logging.basicConfig(level=logging.INFO)

# actionList=['积点','一枪','两枪','三枪','四枪','大炮',
#             '穿刺','核武器','超级核武器','刺客','趴下','反弹',
#             '吸收','强制吸收','牺牲','回血','自爆','交易','克隆']

cost=np.array([[-1,1,2,3,4,5,6,7,20,2,0,0,2,4,-3,4,2,0,1],
               [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,1,0]])

class Action(BaseAction):
    def __init__(self,player,actionType,manual=False):
        self.player=player
        self.actionType=actionType
        self.manual=manual

    def __str__(self):
        return str(self.actionType)

    def __repr__(self):
        return str(self)

    def __eq__(self,other):
        return self.__class__==other.__class__ and self.actionType==other.actionType and self.player==other.player

    def __hash__(self):
        return hash((self.actionType,self.player,self.manual))


class JiDian(BaseState):
    def __init__(self):
        self.currentPlayer: int=0
        self.round: int=0
        self.points=np.array([0,0])
        self.lives=np.array([2,2])
        self.attacks=np.array([0,0])
        self.defends=np.array([0,0])
        self.trade=np.array([[0,0],[0,0]])

        self.tempPoints=np.array([0,0])
        self.tempLives=np.array([2,2])
        self.tempAttacks=np.array([0,0])
        self.tempDefends=np.array([0,0])
        self.tempTypes=np.array([0,0])
        self.tempTrade=np.array([[0,0],[0,0]])

    def get_current_player(self) -> int:
        return self.currentPlayer

    def get_possible_actions(self):
        player=self.currentPlayer
        possibleActions=[Action(player=player,actionType=0)]
        for i in range(1,19):
            if cost[0,i]<=self.points[player] and cost[1,i]<self.lives[player]:
                if self.points[1-player]==0:
                    if 10<=i<=11 or i==18:
                        continue
                possibleActions.append(Action(player=player,actionType=i))
        return possibleActions

    def toTemp(self):
        for player in [0,1]:
            self.tempPoints[player]=deepcopy(self.points[player])
            self.tempLives[player]=deepcopy(self.lives[player])
            self.tempAttacks[player]=deepcopy(self.attacks[player])
            self.tempDefends[player]=deepcopy(self.defends[player])
            self.tempTrade[player]=deepcopy(self.trade[player])

    def fromTemp(self):
        for player in [0,1]:
            self.points[player]=deepcopy(self.tempPoints[player])
            self.lives[player]=deepcopy(self.tempLives[player])
            self.attacks[player]=deepcopy(self.tempAttacks[player])
            self.defends[player]=deepcopy(self.tempDefends[player])
            self.trade[player]=deepcopy(self.tempTrade[player])

    def is_terminal(self):
        #if self.currentPlayer==0:
        #    return False
        for player in [0,1]:
            if self.lives[player]<=0:
                return True
        return False

    def get_reward(self):
        player=self.currentPlayer
        return self.lives[player]-self.lives[1-player]

    def take_action(self,action: Action):
        newState=deepcopy(self)
        player=action.player
        actionType=action.actionType
        newState.toTemp()
        newState.tempTypes[player]=actionType
        newState.tempPoints[player]-=cost[0,actionType]
        newState.tempLives[player]-=cost[1,actionType]
        coef=np.power(2,newState.tempTrade[0,player])
        if actionType<6:
            newState.tempAttacks[1-player]=actionType*coef
            newState.tempDefends[player]=actionType*coef
        elif actionType==6:
            newState.tempLives[1-player]-=coef
        elif actionType==7:
            newState.tempAttacks[1-player]=5*coef
        elif actionType==8:
            newState.tempAttacks[1-player]=20*coef
        elif actionType==9:
            newState.tempAttacks[1-player]=coef
        elif actionType==14:
            newState.tempDefends[player]=10*coef
        elif actionType==15:
            newState.tempDefends[player]=10*coef
        elif actionType==17:
            if newState.tempTrade[0,player]==0:
                newState.tempTrade[1,player]=2
            newState.tempTrade[0,player]+=1
            newState.tempDefends[player]=20*coef

        if player==0:
            newState.fromTemp()
            newState.currentPlayer=1
            self.round+=1
            return newState

        for player in [0,1]:  #从攻击方视角出发，进行技能交互的研究
            _coef=np.power(2,newState.tempTrade[0,1-player])  #交易导致的对方防御倍率
            myMove=newState.tempTypes[player]  #自己的action
            otherMove=newState.tempTypes[1-player]  #对方的action
            if myMove==6:  #穿刺
                if otherMove<=2:  #一枪或两枪，反弹
                    newState.tempAttacks[1-player]=newState.tempAttacks[player]
                    newState.tempDefends[1-player]=0
                    newState.tempDefends[player]=newState.tempAttacks[player]
                elif 2<otherMove<9:  #超过两枪，反弹无效
                    newState.tempLives[1-player]+=1
            elif otherMove==10 and myMove!=7 and myMove!=8:
                if myMove==0:
                    newState.tempDefends[player]=10
                elif myMove==9:
                    newState.tempLives[1-player]=0
                else:
                    newState.tempDefends[1-player]=3*_coef
            if myMove==11:
                if otherMove<=2:  #一枪或两枪，反弹
                    newState.tempAttacks[1-player]=newState.tempAttacks[player]
                    newState.tempDefends[1-player]=0
                    newState.tempDefends[player]=newState.tempAttacks[player]
                elif 2<otherMove<9:  #超过两枪，反弹无效
                    pass
            elif otherMove==12:  #吸收
                if newState.tempAttacks[1-player]<=9*_coef:
                    newState.tempPoints[1-player]+=newState.tempAttacks[1-player]
                    newState.tempDefends[1-player]=newState.tempAttacks[1-player]
            elif otherMove==13:  #强制吸收
                if newState.tempAttacks[1-player]<=19*_coef:
                    newState.tempPoints[1-player]+=newState.tempAttacks[1-player]
                    newState.tempDefends[1-player]=newState.tempAttacks[1-player]
                if myMove==0:
                    newState.tempPoints[player]-=1
                    newState.tempPoints[1-player]+=1
            elif otherMove==16:  #自爆
                if newState.tempAttacks[1-player]>0:
                    newState.tempLives[player]=0
                else:
                    newState.tempLives[1-player]=0
            elif otherMove==18:  #克隆
                if newState.tempPoints[1-player]>=2 and newState.tempAttacks[1-player]>0:
                    newState.tempDefends[1-player]=newState.tempAttacks[1-player]
                    newState.tempAttacks[player]=newState.tempAttacks[1-player]
                    newState.tempPoints[1-player]-=2

        newState.tempLives[player]-=np.maximum(0,newState.tempAttacks[player]-newState.tempDefends[player])
        newState.tempLives[1-player]-=np.maximum(0,newState.tempAttacks[1-player]-newState.tempDefends[1-player])

        if action.manual:
            #pass
            print(newState.tempTypes[player],newState.tempTypes[1-player])
            print(newState.tempAttacks[player],newState.tempAttacks[1-player])
            print(newState.tempDefends[player],newState.tempDefends[1-player])

        newState.tempAttacks[player],newState.tempAttacks[1-player]=0,0
        newState.tempDefends[player],newState.tempDefends[1-player]=0,0

        for player in [0,1]:
            if newState.tempTrade[1,player]>0:  #交易效果结算
                newState.tempTrade[1,player]-=1
            if newState.tempTrade[1,player]==0:
                newState.tempTrade[0,player]=0

        newState.fromTemp()
        newState.currentPlayer=1-player
        newState.round+=1

        return newState

def AIvsAI_interface(rounds,initState):
    searcher=MCTS(time_limit=12000)
    state=initState
    new_type=[0,0]
    for i in range(rounds):
        points=state.points
        lives=state.lives
        for player in [0,1]:
            print('玩家%s有点数:%s,命数:%s'%(player,points[player],lives[player]))
        state.currentPlayer=0
        bestAction,details=searcher.search(initial_state=state,needDetails=True)
        print(details)
        new_type[0]=bestAction.actionType
        print('玩家0思考完毕')

        state.currentPlayer=1
        bestAction,details=searcher.search(initial_state=state,needDetails=True)
        print(details)
        new_type[1]=bestAction.actionType
        print('玩家1思考完毕')

        state.currentPlayer=0
        state=state.take_action(Action(0,new_type[0]))
        state=state.take_action(Action(1,new_type[1]))

        # for player in [0,1]:
        #     print('玩家 %s 使用了 %s！'%(player,actionList[new_type[player]]))

        for player in [0,1]:
            print('玩家 %s 使用了 %s！'%(player,new_type[player]))

        time.sleep(1)

        for player in [0,1]:
            if state.lives[player]<=0:
                print('玩家 %s 赢了'%(1-player))
                return 0

def AI_interface(rounds,initState):
    searcher=MCTS(time_limit=12000)
    state=initState
    new_type=[0,0]
    for i in range(rounds):
        points=state.points
        lives=state.lives
        bestAction,details=searcher.search(initial_state=state,needDetails=True)
        print(details)
        for player in [0,1]:
            print('玩家%s有点数:%s,命数:%s'%(player,points[player],lives[player]))
        new_type[0]=bestAction.actionType
        print('玩家0思考完毕')
        state=state.take_action(Action(0,new_type[0]))

        new_type[1]=-1
        while new_type[1]<0:
            new_type[1]=int(input('玩家1，请输入您要采取的动作'))
            if cost[0,new_type[1]]>points[1]:
                print('点数不足')
                new_type[1]=-1
            if cost[1,new_type[1]]>lives[1]:
                print('命数不足')
                new_type[1]=-1
        state=state.take_action(Action(1,new_type[1],manual=False))

        # for player in [0,1]:
        #     print('玩家 %s 使用了 %s！'%(player,actionList[new_type[player]]))

        for player in [0,1]:
            print('玩家 %s 使用了 %s！'%(player,new_type[player]))

        time.sleep(1)

        for player in [0,1]:
            if state.lives[player]<=0:
                print('玩家 %s 赢了'%(1-player))
                return 0


def interface(rounds,initState):
    state=initState
    new_type=[0,0]
    for i in range(rounds):
        points=state.points
        lives=state.lives
        new_type[0]=-1
        for player in [0,1]:
            print('玩家%s有点数:%s,命数:%s'%(player,points[player],lives[player]))
        while new_type[0]<0:
            new_type[0]=int(input('玩家0，请输入您要采取的动作'))
            while new_type[0]>18:
                new_type[0]=int(input('玩家0，请输入您要采取的动作'))
            if cost[0,new_type[0]]>points[0]:
                print('点数不足')
                new_type[0]=-1
        state=state.take_action(Action(0,new_type[0],manual=True))

        new_type[1]=-1
        while new_type[1]<0:
            new_type[1]=int(input('玩家1，请输入您要采取的动作'))
            while new_type[1]>18:
                new_type[1]=int(input('玩家1，请输入您要采取的动作'))
            if cost[0,new_type[1]]>points[1]:
                print('点数不足')
                new_type[1]=-1
        state=state.take_action(Action(1,new_type[1],manual=True))

        # for player in [0,1]:
        #     print('玩家 %s 使用了 %s！'%(player,actionList[new_type[player]]))

        for player in [0,1]:
            print('玩家 %s 使用了 %s！'%(player,new_type[player]))

        for player in [0,1]:
            if state.lives[player]<=0:
                print('玩家 %s 赢了'%(1-player))
                return 0


initState=JiDian()

if __name__=='__main__':
    #AI_interface(50,initState)
    #interface(50,initState) #debug时使用
    AIvsAI_interface(50,initState) #不建议使用