from Agent.Astar.Astar_Home import *
import math


def AStarRewardatrix(env,player):

    #返回自己和地方的点
    if player == 1:
        nowstate = env.play1state
        opponent = env.play2state
    if player == 2:
        nowstate = env.play2state
        opponent = env.play1state


    #分数矩阵
    Socre = {}
    Socre["U"] = 0
    Socre["D"] = 0
    Socre["L"] = 0
    Socre["R"] = 0
    Socre["S"] = 0



    #判断豆最多的点
    for reward_point in env.reward_states:
        aStar = AStar(startpoint=nowstate, endpoint=reward_point, env=env, player=player)
        action,length = aStar.action()
        if action is not  None:
            Socre[action] = Socre[action] + env.reward_states[reward_point] * math.pow(length,-0.3)

    return Socre


def AStarRewardOneMove(Socre):

    #判断对手位置
    BestMove = None
    BestValue = 0
    for key in Socre.keys():
        if Socre[key] > BestValue:
            BestValue = Socre[key]
            BestMove = key

    return BestMove


def AStarRewardListThink(actionRewardList,illegalMove):

    BestMove = None
    Gmma = 0.5
    thinkNum = 3
    #如果长度小于3
    if len(actionRewardList) <= 3:
        BestMove = AStarRewardOneMove(actionRewardList[-1])

    else:
        ScoreNew = {}
        ScoreNew["U"] = 0
        ScoreNew["D"] = 0
        ScoreNew["L"] = 0
        ScoreNew["R"] = 0
        ScoreNew["S"] = 0
        for move in illegalMove:
            for i in range(thinkNum):
                #考虑多步
                ScoreNew[move] = Gmma * ScoreNew[move] + actionRewardList[i - thinkNum][move]

        BestMove = AStarRewardOneMove(ScoreNew)



    return BestMove
