# myTeam.py
# ---------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html

from captureAgents import CaptureAgent
import random, time, util
from game import Directions
import game
import math
from copy import deepcopy


#################
# Team creation #
#################

def createTeam(firstIndex, secondIndex, isRed,
               first='MyAgentA', second='MyAgentB'):
    """
    This function should return a list of two agents that will form the
    team, initialized using firstIndex and secondIndex as their agent
    index numbers.  isRed is True if the red team is being created, and
    will be False if the blue team is being created.

    As a potentially helpful development aid, this function can take
    additional string-valued keyword arguments ("first" and "second" are
    such arguments in the case of this function), which will come from
    the --redOpts and --blueOpts command-line arguments to capture.py.
    For the nightly contest, however, your team will be created without
    any extra arguments, so you should make sure that the default
    behavior is what you want for the nightly contest.
    """

    # The following line is an example only; feel free to change it.
    return [eval(first)(firstIndex), eval(second)(secondIndex)]


##########
# Agents #
##########

class DummyAgent(CaptureAgent):
    """
    A Dummy agent to serve as an example of the necessary agent structure.
    You should look at baselineTeam.py for more details about how to
    create an agent as this is the bare minimum.
    """

    def registerInitialState(self, gameState):
        """
        This method handles the initial setup of the
        agent to populate useful fields (such as what team
        we're on).

        A distanceCalculator instance caches the maze distances
        between each pair of positions, so your agents can use:
        self.distancer.getDistance(p1, p2)

        IMPORTANT: This method may run for at most 15 seconds.
        """

        ''' 
        Make sure you do not delete the following line. If you would like to
        use Manhattan distances instead of maze distances in order to save
        on initialization time, please take a look at
        CaptureAgent.registerInitialState in captureAgents.py. 
        '''
        CaptureAgent.registerInitialState(self, gameState)

        ''' 
        Your initialization code goes here, if you need any.
        '''

    def chooseAction(self, gameState):
        """
        Picks among actions randomly.
        """
        actions = gameState.getLegalActions(self.index)

        ''' 
        You should change this in your own agent.
        '''

        return random.choice(actions)


class MyAgent(CaptureAgent):
    w = None
    h = None
    return_points = None

    def registerInitialState(self, gameState):
        CaptureAgent.registerInitialState(self, gameState)
        self.w = gameState.data.layout.width
        self.h = gameState.data.layout.height
        if self.red:
            return_column = self.w / 2 - 1
        else:
            return_column = self.w / 2
        self.return_points = [(return_column, y) for y in range(self.h) if not gameState.getWalls()[return_column][y]]
        foodLst = deepcopy(self.getFood(gameState).asList())

        # Calculate the heat map
        heatMap = [[0 for x in range(self.h)] for y in range(self.w)]  # [w][h]
        for food in foodLst:
            for x in range(int(self.w / 2), self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        d = self.getMazeDistance(food, (x, y))
                        heatMap[x][y] += max(0, 100 - d ** 2)

        localMax = []
        for x in range(int(self.w / 2), self.w):
            for y in range(self.h):
                if not gameState.getWalls()[x][y] and heatMap[x][y] > 0:
                    ht = heatMap[x][y]
                    if x - 1 >= 0 and not gameState.getWalls()[x - 1][y] and ht < heatMap[x - 1][y]: continue
                    if x + 1 < self.w and not gameState.getWalls()[x + 1][y] and ht < heatMap[x + 1][y]: continue
                    if y - 1 >= 0 and not gameState.getWalls()[x][y - 1] and ht < heatMap[x][y - 1]: continue
                    if y + 1 < self.h and not gameState.getWalls()[x][y + 1] and ht < heatMap[x][y + 1]: continue
                    localMax.append(Point([x, y]))
                    # self.debugDraw([(x, y)], [0, 1, 0])

        # Remove two farthest food points
        min1 = [0, 0, 999999]  # x, y, heat
        min2 = [0, 0, 999999]  # x, y, heat
        for (x, y) in foodLst:
            if heatMap[x][y] < min1:
                min2 = min1
                min1 = [x, y, heatMap[x][y]]
        foodLst.remove((min1[0], min1[1]))
        foodLst.remove((min2[0], min2[1]))

        # Recalculate the heat map and the local max points
        heatMap = [[0 for x in range(self.h)] for y in range(self.w)]  # [w][h]
        for food in foodLst:
            for x in range(int(self.w / 2), self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        d = self.getMazeDistance(food, (x, y))
                        heatMap[x][y] += max(0, 100 - d ** 2)
        localMax = []
        for x in range(int(self.w / 2), self.w):
            for y in range(self.h):
                if not gameState.getWalls()[x][y] and heatMap[x][y] > 0:
                    ht = heatMap[x][y]
                    if x - 1 >= 0 and not gameState.getWalls()[x - 1][y] and ht < heatMap[x - 1][y]: continue
                    if x + 1 < self.w and not gameState.getWalls()[x + 1][y] and ht < heatMap[x + 1][y]: continue
                    if y - 1 >= 0 and not gameState.getWalls()[x][y - 1] and ht < heatMap[x][y - 1]: continue
                    if y + 1 < self.h and not gameState.getWalls()[x][y + 1] and ht < heatMap[x][y + 1]: continue
                    localMax.append(Point([x, y]))
                    # self.debugDraw([(x, y)], [0, 1, 0])

        minDiff = 999999
        for _ in range(10):  # try k-means several times to get the best division
            clusters = kmeans(localMax, 2, 0.05)
            localMaxPos = map(lambda p: tuple(p.coords), localMax)
            clusterA = map(lambda p: tuple(p.coords), clusters[0].points)
            clusterB = map(lambda p: tuple(p.coords), clusters[1].points)
            foodClusterA = []
            foodClusterB = []
            for food in foodLst:
                dists = [self.getMazeDistance(food, locMax) for locMax in localMaxPos]
                minDist = min(dists)
                nearestLocalMax = localMaxPos[dists.index(minDist)]
                if nearestLocalMax in clusterA:
                    foodClusterA.append(food)
                elif nearestLocalMax in clusterB:
                    foodClusterB.append(food)
            diff = abs(len(foodClusterA) - len(foodClusterB))
            if diff < minDiff:
                minDiff = diff
                self.foodClusterA = deepcopy(foodClusterA)
                self.foodClusterB = deepcopy(foodClusterB)

        # To make cluster division deterministic. ClusterA should be on top of clusterB or to the left to it.
        topA = max([food[1] for food in self.foodClusterA])
        topB = max([food[1] for food in self.foodClusterB])
        leftA = min([food[0] for food in self.foodClusterA])
        leftB = min([food[0] for food in self.foodClusterB])
        if topA < topB:
            self.foodClusterA, self.foodClusterB = self.foodClusterB, self.foodClusterA
        elif topA == topB:
            if leftA > leftB:
                self.foodClusterA, self.foodClusterB = self.foodClusterB, self.foodClusterA

        for food in foodLst:
            if food in self.foodClusterA:
                self.debugDraw(food, [1, 0, 0])
            elif food in self.foodClusterB:
                self.debugDraw(food, [0, 0, 1])

        # maxHeat = max(map(max, heatMap))
        # for x in range(w):
        #     for y in range(h):
        #         if not gameState.getWalls()[x][y] and heatMap[x][y] > 0:
        #             self.debugDraw([(x, y)], [heatMap[x][y] * 1.0 / maxHeat, 0, 0])

        self.heatMap = heatMap

    def chooseAction(self, gameState):
        actions = gameState.getLegalActions(self.index)
        if self.index in [0, 1]:  # Go for self.foodClusterA
            pass
        return random.choice(actions)


class MyAgentA(MyAgent):
    food_list = None
    heatMap = None

    def registerInitialState(self, gameState):
        MyAgent.registerInitialState(self, gameState)
        self.food_list = self.foodClusterA
        self.heatMap = [[0 for x in range(self.h)] for y in range(self.w)]
        for food in self.food_list:
            for x in range(self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        d = self.getMazeDistance(food, (x, y))
                        self.heatMap[x][y] += 2 ** (100 - d)


    def chooseAction(self, gameState):
        actions = gameState.getLegalActions(self.index)
        actions.pop(actions.index('Stop'))
        values = [self.evaluate(gameState, action) for action in actions]
        maxValue = max(values)
        bestActions = [a for a, v in zip(actions, values) if v == maxValue]
        best_action = bestActions[0]
        best_successor = gameState.generateSuccessor(self.index, best_action)
        best_position = best_successor.getAgentPosition(self.index)
        if best_position in self.food_list:
            self.food_list.pop(self.food_list.index(best_position))
            i = 1
            for x in range(self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        # is not wall
                        d = self.getMazeDistance((x, y), best_position)
                        self.heatMap[x][y] -= 2 ** (100 - d)
        return best_action

    def evaluate(self, gameState, action):
        successor = gameState.generateSuccessor(self.index, action)
        position = successor.getAgentPosition(self.index)
        if self.food_list:
            return self.heatMap[position[0]][position[1]]
        else:
            return 100 - min([self.getMazeDistance(return_point, position) for return_point in self.return_points])


class MyAgentB(MyAgent):
    food_list = None
    heatMap = None

    def registerInitialState(self, gameState):
        MyAgent.registerInitialState(self, gameState)
        self.food_list = self.foodClusterB
        self.heatMap = [[0 for x in range(self.h)] for y in range(self.w)]
        for food in self.food_list:
            for x in range(self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        d = self.getMazeDistance(food, (x, y))
                        self.heatMap[x][y] += 2 ** (100 - d)

    def chooseAction(self, gameState):
        actions = gameState.getLegalActions(self.index)
        actions.pop(actions.index('Stop'))
        values = [self.evaluate(gameState, action) for action in actions]
        maxValue = max(values)
        bestActions = [a for a, v in zip(actions, values) if v == maxValue]
        best_action = bestActions[0]
        best_successor = gameState.generateSuccessor(self.index, best_action)
        best_position = best_successor.getAgentPosition(self.index)
        if best_position in self.food_list:
            self.food_list.pop(self.food_list.index(best_position))
            i = 1
            for x in range(self.w):
                for y in range(self.h):
                    if not gameState.getWalls()[x][y]:
                        # is not wall
                        d = self.getMazeDistance((x, y), best_position)
                        self.heatMap[x][y] -= 2 ** (100 - d)
        return best_action

    def evaluate(self, gameState, action):
        successor = gameState.generateSuccessor(self.index, action)
        position = successor.getAgentPosition(self.index)
        if self.food_list:
            return self.heatMap[position[0]][position[1]]
        else:
            return 100 - min([self.getMazeDistance(return_point, position) for return_point in self.return_points])


'''
The following code is k-means clustering algorithm.
I guess it's OK to copy existing machine learning algorithms.
See original: https://gist.github.com/iandanforth/5862470
'''


class Point(object):
    def __init__(self, coords):
        self.coords = coords
        self.n = len(coords)

    def __repr__(self):
        return str(self.coords)


class Cluster(object):
    def __init__(self, points):
        if len(points) == 0:
            raise Exception("ERROR: empty cluster")
        self.points = points
        self.n = points[0].n
        self.centroid = self.calculateCentroid()

    def __repr__(self):
        return str(self.points)

    def update(self, points):
        old_centroid = self.centroid
        self.points = points
        self.centroid = self.calculateCentroid()
        shift = getDistance(old_centroid, self.centroid)
        return shift

    def calculateCentroid(self):
        numPoints = len(self.points)
        coords = [p.coords for p in self.points]
        unzipped = zip(*coords)
        centroid_coords = [math.fsum(dList) / numPoints for dList in unzipped]
        return Point(centroid_coords)


def kmeans(points, k, cutoff):
    initial = random.sample(points, k)
    clusters = [Cluster([p]) for p in initial]
    loopCounter = 0
    while True:
        lists = [[] for _ in clusters]
        clusterCount = len(clusters)
        loopCounter += 1
        for p in points:
            smallest_distance = getDistance(p, clusters[0].centroid)
            clusterIndex = 0
            for i in range(clusterCount - 1):
                distance = getDistance(p, clusters[i + 1].centroid)
                if distance < smallest_distance:
                    smallest_distance = distance
                    clusterIndex = i + 1
            lists[clusterIndex].append(p)
        biggest_shift = 0.0
        for i in range(clusterCount):
            shift = clusters[i].update(lists[i])
            biggest_shift = max(biggest_shift, shift)
        if biggest_shift < cutoff:
            break
    return clusters


def getDistance(a, b):
    accumulatedDifference = 0.0
    for i in range(a.n):
        squareDifference = pow((a.coords[i] - b.coords[i]), 2)
        accumulatedDifference += squareDifference
    distance = math.sqrt(accumulatedDifference)
    return distance
