from __future__ import division
from random import random

import numpy as np
import pandas as pd

class RPS:

    actions = [
        'ROCK',
        'PAPER',
        'SCISSORS'
    ]
    n_actions = 3
    utilities = pd.DataFrame(
        [
            [0, -1, 1],
            [1, 0, -1],
            [-1, 1, 0]
        ],
        columns=actions,
        index=actions
    )

class Player:

    def __init__(self, name):
        self.name = name
        self.strategy, self.avg_strategy,\
        self.strategy_sum, self.regret_sum = \
            np.zeros((4, RPS.n_actions))
    
    def set_strategy(self, strategy):
        self.strategy = strategy
        
    def __repr__(self):
        return self.name
    
    def update_strategy(self):
        self.strategy = np.copy(self.regret_sum)
        self.strategy[self.strategy < 0] = 0

        s = sum(self.strategy)
        if s > 0:
            self.strategy /= s
        else:
            self.strategy = np.repeat(1/RPS.n_actions, RPS.n_actions)
        
        self.strategy_sum += self.strategy
    
    def regret(self, my_action, opp_action):

        rst = RPS.utilities.loc[my_action, opp_action]
        facts = RPS.utilities.loc[:, opp_action].values
        regret = facts - rst
        self.regret_sum += regret

    def action(self, use_avg=False):

        strategy = self.avg_strategy if use_avg else self.strategy
        return np.random.choice(RPS.actions, p=strategy)
    
    def learn_avg_strategy(self):
        # avg strategy converge to NE
        s = sum(self.strategy_sum)
        if s > 0:
            self.avg_strategy = self.strategy_sum / s
        else:
            self.avg_strategy = np.repeat(1/RPS.n_actions, RPS.n_actions)
        # print(f'{self.name} learned avg_strategy:\n{self.avg_strategy}')
    
class Game:

    def __init__(self, max_game=500):
        self.p1 = Player('Alex') 
        self.p2 = Player('Bob')
        self.max_game = max_game

        # player's preference
        s1 = [0.1, 0.1, 0.8]
        s2 = np.random.random(RPS.n_actions)
        s2 = s2 / sum(s2)
        self.p1.set_strategy(s1)
        self.p2.set_strategy(s2)

    def winner(self, a1, a2):
        rst = RPS.utilities.loc[a1, a2]
        if rst == 1:
            return self.p1
        elif rst == -1:
            return self.p2
        else:
            return 'Draw'
        
    def play(self, avg_regret_matching=False):

        def play_regret_matching():
            for i in range(0, self.max_game):
                self.p1.update_strategy()
                self.p2.update_strategy()
                a1 = self.p1.action()
                a2 = self.p2.action()
                self.p1.regret(a1, a2)
                self.p2.regret(a2, a1)

                w = self.winner(a1, a2)
                num_wins[w] += 1

        def play_avg_regret_matching():
            for i in range(0, self.max_game):
                a1 = self.p1.action(use_avg=True)
                a2 = self.p2.action(use_avg=True)

                # self.p1.regret(a1, a2)
                # self.p2.regret(a2, a1)
                # self.p1.update_strategy()
                # self.p2.update_strategy()
                # self.p1.learn_avg_strategy()
                # self.p2.learn_avg_strategy()
                # if i % 1000 == 0:
                #     self.p2.learn_avg_strategy()

                winner = self.winner(a1, a2)
                num_wins[winner] += 1
            print(f'{self.p1.name} learned avg_strategy:\n{self.p1.avg_strategy}')
            print(f'{self.p2.name} learned avg_strategy:\n{self.p2.avg_strategy}')

        
        num_wins = {
            self.p1:0,
            self.p2:0,
            'Draw':0
        }
        
        play_regret_matching() if not avg_regret_matching else play_avg_regret_matching()
        print(num_wins)

    def conclude(self):

        self.p1.learn_avg_strategy()
        self.p2.learn_avg_strategy()
        print(f'{self.p1.name} learned avg_strategy:\n{self.p1.avg_strategy}')
        print(f'{self.p2.name} learned avg_strategy:\n{self.p2.avg_strategy}')


if __name__ == "__main__":
    game = Game(2000)
    game.play()

    game.conclude()
    game.play(avg_regret_matching=True)
