from env_init.environment import *
from Agent.AlphaBeta.AlphaBetaNode import *
from Agent.AlphaBeta.AlphaBetaSearch import *
from Agent.Astar.Astar_Home import *
from Agent.Q_Learning.Q_V1 import *
from Agent.ShortestMoveStra.OneMove import *
from Agent.Astar.Astar_reward import *
import time


def distance(state1, state2):
    x_distance = abs(int(state1 / 12) - int(state2 / 12))
    y_distance = abs(int(state1 % 12) - int(state2 % 12))
    totaldistance = x_distance + y_distance
    return totaldistance


class Maintrategy:
    Runnum = 0
    actionList = []
    locationList1 = []
    locationList2 = []

    def __init__(self, env, player):

        self.env = env
        self.player = player
        self.loopnum = 0

    # 判断当前局面是不是循环
    def checkloop(self):

        if len(Maintrategy.actionList) < 6:
            return 0
        if self.player == 1:
            locationList = Maintrategy.locationList1
        else:
            locationList = Maintrategy.locationList2

        if (locationList[-1] == locationList[-3]) \
                and (locationList[-2] == locationList[-4]):
            return 3
        else:
            return 0

    def getAction(self, reset=False):
        t = time.time()

        if reset is True:
            Maintrategy.Runnum = 0

        if self.player == 1:
            jobs = self.env.play1capacity
            playturn = 2
            selfPlay = 1
            nowstate = self.env.play1state
            home = self.env.play1base
            locationList = Maintrategy.locationList1

        if self.player == 2:
            jobs = self.env.play2capacity
            playturn = 1
            selfPlay = 2
            nowstate = self.env.play2state
            home = self.env.play2base
            locationList = Maintrategy.locationList2

        totalDistance = distance(self.env.play1state, self.env.play2state)
        homeDistance = distance(nowstate, home)
        locationList.append(nowstate)

        # 主要约束条件——即增加各类条件下的处理。
        # 如果在负荷小于7的时候,同时没接近最终结果的时候弄一次贪心

        if jobs < 10:
            # 通过贪婪搜索进行相应的分值判断
            greedymove = OneMove(self.env, self.player, nowstate)
            if greedymove != "P":
                Maintrategy.Runnum = Maintrategy.Runnum + 1
                print("greedy move cost :" + str(time.time() - t))
                Maintrategy.actionList.append(greedymove)
                return greedymove

        if (jobs < 7 and ((Config.RUNNUMMBERLIMIT - Maintrategy.Runnum) - homeDistance > 4) \
                and (homeDistance > 4 or jobs < 4) \
                or nowstate == home):

            # 在有循环的时候,使用Q,连续走三步Q
            checkLoop = self.loopnum + self.checkloop()

            if checkLoop > 0 or totalDistance > 5:
                # action = Q_learning_Agent(self.env,player = self.player)
                # 通过激励函数进行更新
                action = AStarReward(self.env, self.player)
                print("Astar Reward move cost :" + str(time.time() - t))
                self.loopnum = self.loopnum - 1

            else:

                node = Node(move=None, value=None, env=self.env, playturn=playturn, deepth=1, isTerminal=False,
                            selfPlay=selfPlay)

                # 主策略——博弈树
                # action = alpha_beta_search(node)
                # aStar = AStar(startpoint=nowstate, endpoint=node, env=self.env, player=self.player)
                # action, _ = aStar.action()
                action = Q_learning_Agent(self.env, player=self.player)
                if action not in ["U", "D", "R", "L"]:
                    print("asdasdsadsad")

                print("search move cost :" + str(time.time() - t))

        # >5的时候 执行a star,回城,顺便吃一个
        else:
            aStar = AStar(startpoint=nowstate, endpoint=home, env=self.env, player=self.player)
            # 采用A-star
            action, _ = aStar.action()
            print("A-star move cost :" + str(time.time() - t))

        Maintrategy.Runnum = Maintrategy.Runnum + 1
        Maintrategy.actionList.append(action)
        return action
