import numpy as np
import pickle
import time
from board import board

def find_isomorphic_pattern(pattern):
    a = board(list(range(16)))

    isomorphic_pattern = []
    for i in range(8):
        if (i >= 4):
            b = board( a.mirror().tile )
        else:
            b = board( a.tile )
        for _ in range(i%4):
            b = b.rotate()
        isomorphic_pattern.append(np.array(b.tile)[pattern])
        
    return isomorphic_pattern

class TuplesNet():      # 一个pattern
    def __init__(self, pattern, maxnum):
        self.V = np.zeros(([maxnum]*len(pattern)))      # state values
        self.pattern = pattern
        self.isomorphic_pattern = find_isomorphic_pattern(self.pattern)

    def getState(self, tile):
        return [tuple(np.array(tile)[p]) for p in self.isomorphic_pattern]
    
    def getValue(self, tile):
        S = self.getState(tile)
        
        V = [self.V[s] for s in S]
        
        # sum all value from isomorphic pattern
        V = sum(V)
        #V = sum(V) / len(V)
        
        return V
    
    def setValue(self, tile, v, reset=False):
        S = self.getState(tile)
        
        # set value to isomorphic pattern
        v /= len(self.isomorphic_pattern)
        V = 0.0
        for s in S:
            self.V[s] += v
            #if not reset:
                #self.V[s] += v
            #else:
                #self.V[s] =  v
                
            V += self.V[s]
        return V


class Agent():
    def __init__(self, patterns, maxnum):
        self.Tuples = []
        for p in patterns:
            self.Tuples.append(TuplesNet(p, maxnum))
        self.metrics = []
        
    def getValue(self, tile):
        V = [t.getValue(tile) for t in self.Tuples]
        
        # get average
        V = sum(V)
        #V = sum(V) / len(V)
        
        return V
    
    def setValue(self, tile, v, reset=False):
        v /= len(self.Tuples)
        V = 0.0
        for t in self.Tuples:
            V += t.setValue(tile, v, reset)
        return V
    
    # get all s'
    def evaulate(self, next_games):
        # TD(0)-after
        #  r + V(s')
        return [ng[1] + self.getValue(ng[0].tile) for ng in next_games]
    
    def learn(self, records, lr):
        
        exact = 0.0
        
        # exact = records[0][2]
        # records[0:] ignore terminate, s' can't be s''
        
        # learn from end to begin
        # records = [end .... begin]
        # (s, a, r, s', s'')
        for s, a, r, s_, s__ in records: 
            # TD(0)-after
            
            # V(s') = V(s') + \alpha ( r_next + V(s'_next) - V(s') )
            error = exact - self.getValue(s_)
            exact = r + self.setValue(s_, lr*error)
            
            # from c++ impelement
            # V(s') = V(s) - r ?
            # error = exact - (self.getValue(s) - r)
            # exact = r + self.setValue(s_, lr*error)
            
    def showStattistic(self, epoch, unit, show=True):
        metrics = np.array(self.metrics[epoch-unit:epoch])
        
        # get average score
        score_mean = np.mean(metrics[:, 0])
        # get max score
        score_max = np.max(metrics[:, 0])
        
        if show:
            print('{:<8d}mean = {:<8.0f} max = {:<8.0f}'.format(epoch, score_mean, score_max))
        
        if (metrics.shape[1] < 3):
            return score_mean, score_max
        
        # all end game board
        end_games = metrics[:, 2]
        
        reach_nums = np.array([1<<max(end) & -2 for end in end_games])
                  
        if show:
            print('\n')
        
        score_stat = []
        
        for num in np.sort(np.unique(reach_nums)):
            # count how many game over this num
            reachs = np.count_nonzero(reach_nums >= num)
            reachs = (reachs*100)/len(metrics)
            
            # count how many game end at this num
            ends = np.count_nonzero(reach_nums == num)
            ends = (ends*100)/len(metrics)
            
            if show:
                print('{:<5d}  {:3.1f} % ({:3.1f} %)'.format(num, reachs, ends) )
            
            score_stat.append( (num, reachs, ends) )
        
        score_stat = np.array(score_stat)
        
        return score_mean, score_max, score_stat
    
    def train(self, epoch_size, lr=0.1, showsize=1000, savesize=1000):
        start_epoch = len(self.metrics)
        for epoch in range(start_epoch, epoch_size):
            # init score and env (2048)
            score = 0.0
            game = board().popup().popup()
            records = []
            while True:
                # choose action
                next_games = [game.up(), game.down(), game.left(), game.right()]
                action = np.argmax(self.evaulate(next_games))
                
                # do action
                # s'
                next_game, reward = next_games[action]
                
                # save record (s, a, r, s')
                # records.insert(0, (game.tile, action, reward, next_game.tile) )
                
                # game is same as before, end game
                #if reward == -1:
                if game.end():
                    break
                
                # s''
                next_game_after = next_game.popup()
                
                score += reward
                
                # save record (s, a, r, s', s'')
                records.insert(0, (game.tile, action, reward, next_game.tile, next_game_after.tile) )
                # records.insert(0, (game.tile, action, score, next_game.tile, next_game_after.tile) )
                
                # s = s'' update state
                game = next_game_after
                
            #self.learn(records, lr / len(self.Tuples))
            self.learn(records, lr)
            
            # store score, game len, end game board
            self.metrics.append( (score, len(records), game.tile) )
            if (epoch+1) % showsize == 0:
                # clear_output(wait=True)
                self.showStattistic(epoch+1, showsize)
            
            if (epoch+1) % savesize == 0:
                saveAgent(self, './model/epoch-%d.pkl' % (epoch + 1))

            #if True:
                #print('[{:6d}] l : {:2d}, score : {:4.0f}, status : {}'.format(epoch, len(records), score, showstatus(game)))
    
    # use current state of game, return next game and action
    def play(self, game):
        next_games = [game.up(), game.down(), game.left(), game.right()]
        action = np.argmax(self.evaulate(next_games))
                
        next_game, reward = next_games[action]
        return next_game, reward, ['up', 'down', 'left', 'right'][action]


def saveAgent(agent, fileName):
    with open(fileName, 'wb') as f:
        pickle.dump(agent, f)
    return fileName


def loadAgent(fileName):
    with open(fileName, 'rb') as f:
        agent = pickle.load(f)
    return agent