from random import *
from copy import *

import minimax

class AIAgentOneLookAhead:
    def __init__(self, id):
        self.id = id
        self.history = []
        self.winStates = []
        self.lossStates = []
        
    def addToHistory(self, b):   
        self.history.append(deepcopy(b))
     
    def evaluate(self, b):
        if b.checkWin()[0] == self.id:
            return 1.0
        return 0.0
    
    def learnFromLatestGame(self):
        win = self.history[-1].checkWin()
        n = 0
        for b in reversed(self.history[:-1]):
            if b.checkWin()[0] != 0:
                break
            n = n + 1
            if win[0] == self.id:
                self.winStates.append(b)
            else:
                self.lossStates.append(b)
        if win[0] == self.id:
            print "Learned from %i winning states" % n
        else:
            print "Learned from %i losing states" % n
    
    def play(self, board):
        self.addToHistory(board)
        actions = board.notFullColumns()
        if actions == []:
            return
        highestActionValue = -10000
        for a in actions:
            newBoard = deepcopy(board)
            newBoard.drop(a, self.id)
            value = self.evaluate(newBoard)
            if value > highestActionValue:
                highestActionValue = value
                bestAction = a
        if highestActionValue <= 0:
            actions = board.notFullColumns()
            if actions == []:
                return
            bestAction = choice(actions)
        board.drop(bestAction, self.id)
        self.addToHistory(board)

    def printHistory(self):
        for b in self.history:
            b.printMe()