# -*- coding: utf-8 -*-
from env_init.environment import *
from Agent.AlphaBeta.AlphaBetaNode import *
from Agent.AlphaBeta.AlphaBetaSearch import *
from Agent.Astar.Astar_Home import *
from Agent.Q_Learning.Q_V1 import *
from Agent.ShortestMoveStra.OneMove import *
from Agent.Astar.Astar_reward import *
from Agent.blockShortestMove.blockShortest import *
from Agent.Cruel.CruelAgent import *
from Agent.Astar.AstarGoHome import *
import time
from Agent.VRPPD.vrppd import *


def distance(state1, state2):
    x_distance = abs(int(state1 / 12) - int(state2 / 12))
    y_distance = abs(int(state1 % 12) - int(state2 % 12))
    totalDistance = x_distance + y_distance
    return totalDistance


class Maintrategy:
    Runnum = 0
    actionList = []
    locationList1 = []
    locationList2 = []
    actionRewardList = []
    actionRewardList2 = []
    actionlocalsearch =[]

    def __init__(self, env, player):

        self.env = env
        self.player = player
        self.loopnum = 0

    # 判断当前局面是不是循环
    def checkloop(self):

        if len(Maintrategy.actionList) < 6:
            return 0
        if self.player == 1:
            locationList = Maintrategy.locationList1
        else:
            locationList = Maintrategy.locationList2

        if (locationList[-1] == locationList[-3]) \
                and (locationList[-2] == locationList[-4]):
            return 3
        else:
            return 0

    def getAction(self, reset=False):
        t = time.time()

        if reset == True:
            Maintrategy.Runnum = 0

        if self.player == 1:
            jobs = self.env.play1capacity
            playturn = 2
            selfPlay = 1
            nowstate = self.env.play1state
            oppent = self.env.play2state
            home = self.env.play1base
            oppenthome = self.env.play2base
            locationList = Maintrategy.locationList1
            oppentjobs = self.env.play2capacity

        if self.player == 2:
            jobs = self.env.play2capacity
            playturn = 1
            selfPlay = 2
            nowstate = self.env.play2state
            oppent = self.env.play1state
            home = self.env.play2base
            oppenthome = self.env.play1base
            locationList = Maintrategy.locationList2
            oppentjobs = self.env.play1capacity

        if oppent != oppenthome:

            if nowstate != oppent:

                aStar_oppent = AStar(startpoint=nowstate, endpoint=oppent,
                                     env=self.env, player=self.player)

                _, oppentDistance = aStar_oppent.action()

            else:
                oppentDistance = 0

        else:
            oppentDistance = distance(self.env.play1state, self.env.play2state)

        if nowstate != home:
            aStar_home = AStar(startpoint=nowstate, endpoint=home, env=self.env, player=self.player)
            _, homeDistance = aStar_home.action()
        else:
            # homeDistance = distance(nowstate,home)
            homeDistance = 0

        locationList.append(nowstate)
        scoreMarix = AStarRewardatrix(env=self.env, player=self.player)
        Maintrategy.actionRewardList.append(scoreMarix)

        if homeDistance > 0:
            rewardNum = go_home_jobs(nowstate, home, self.env, self.player)
            #给出一个衰减值
            rewardNum = rewardNum - 2
            if rewardNum < 0:
                rewardNum = 0
        else:
            rewardNum = 0


        # 如果在负荷小于7的时候,同时没接近最终结果的时候

        # 弄一次贪心

        if jobs < 10:
            greedymove = OneMove(self.env, self.player, nowstate)
            if greedymove != "P":
                Maintrategy.Runnum = Maintrategy.Runnum + 1
                print("greedy move cost :" + str(time.time() - t))
                Maintrategy.actionList.append(greedymove)
                return greedymove

        if (jobs < Config.ToTalJobs - rewardNum and (Config.RUNNUMMBERLIMIT - Maintrategy.Runnum) - homeDistance > 5) \
                and (homeDistance > 3 or jobs < 5) \
                or nowstate == home:

            deepth = int(Config.TREEDEEP / 2 - 1)
            # 如果周围没有棋子
            around = self.env.getAllLegalMoveAroud(nowstate, self.player, deepth)
            sumScore = 0
            for onePoint in around:
                if onePoint in self.env.reward_states.keys():
                    sumScore = sumScore + self.env.reward_states[onePoint]

            # 在有循环的时候,使用Q,连续走三步Q
            checkLoop = self.loopnum + self.checkloop()

            # 在分数低的时候用Astar直接给出结果
            if sumScore < Config.Area_Socre_Low_Limit and checkLoop <= 0:
                action = blockValue(self.env, self.player)
                # action = AStarRewardListThink(Maintrategy.actionRewardList,
                #                               self.env.getAllLegalMove(nowstate, self.player))
                print("block Value  move cost :" + str(time.time() - t))


            elif checkLoop > 0 or oppentDistance > (Config.TREEDEEP / 2 + 1) or oppentjobs > 8: #地方满载的时候 不博弈

                action = CruelGreed(env = self.env,nowstate = nowstate ,player = self.player,deepth = deepth)
                # action = AStarReward(self.env, self.player)
                print("Cruel Greed move cost :" + str(time.time() - t))
                self.loopnum = self.loopnum - 1

            # 之后进行搜索
            else:

                node = Node(move=None, value=None, env=self.env, playturn=playturn, deepth=1, isTerminal=False,
                            selfPlay=selfPlay)
                action = alpha_beta_search(node)
                if action not in ["U", "D", "R", "L"]:
                    print("asdasdsadsad")

                print("search move cost :" + str(time.time() - t))

        # >7的时候 执行a star,回城,顺便吃一个
        else:

            aStar = AStar(startpoint=nowstate, endpoint=home, env=self.env, player=self.player)
            action, _ = aStar.action()
            print("A-star home  move cost :" + str(time.time() - t))

        Maintrategy.Runnum = Maintrategy.Runnum + 1
        Maintrategy.actionList.append(action)
        return action


    #192.168.1.112