#include "AI.h"
#include <iostream>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <sys/time.h>

using namespace std;

#define learning_rate 0.00001
#define Lambda 0.4
#define MAX_CONVERGE_STEP 10
#define WIN_OFFSET TERMINAL_DIFF_WEIGHT * 4

const float RANDOM_CHANCE = 0;

gameState gs_history[NUM_STAGES];

void printBoardHistory(int max_stages)
{
    for (int i = 0; i < max_stages; i++)
    {
        printGameState(gs_history[i], BLOCKED_MAP, 1);
    }
}

void learn_online(int max_stages, int MAX_BACKUP)
{
    float value[NUM_STAGES];
    
    // value of the last position determined by disc difference
    int disc_diff = evaluatePieceDiff(gs_history[max_stages - 1]);
    int terminal_value = disc_diff * TERMINAL_DIFF_WEIGHT;
    
    if (terminal_value > 0)
        terminal_value += WIN_OFFSET;
    else if (terminal_value < 0)
        terminal_value -= WIN_OFFSET;
    
    for (int i = 1; i < MAX_BACKUP + 1; i++)
    {
        value[max_stages - i] = terminal_value;
    }

    for (int stage = max_stages - MAX_BACKUP - 1; stage >= 0; stage--)
    {
        Feature_Values feature_value = GetFeatureValue(gs_history[stage]);
            
        //find target as a weighted average of n-next evaluations
        float target = 0;
        float divisor = 0;
        float exp = Lambda;
        for (int k = 1; k <= MAX_BACKUP; k++)
        {
            target += value[stage+k] * exp;
            divisor += exp;
            exp = exp * Lambda;
            //cout << value[stage+k] << " ";
        }
        target = target / divisor;
        //cout << "target: " << target << endl;
        //cin.get();
        for (int i = 0; i < MAX_CONVERGE_STEP; i++)
        {
            float result = evaluateGameL(feature_value, stage);
            
            //static char t_c = '\n';
            //if (i == 0 && t_c == '\n' )
            //{
            //cout << "target: " << target << endl;
            //cout << "evaluation: " << result << endl;
            //t_c = cin.get();
            //}
            // temporal difference * learning rate
            float diff = (target - result) * learning_rate;
            
            float * w = &(WEIGHTS[stage].disc_feature[0]);
            int * fv = &(feature_value.disc_feature[0]);
            for (int k = 0; k < NUM_FEATURES; k++)
            {
                w[k] = w[k] + fv[k] * diff;
            }
        }
        value[stage] = evaluateGameL(feature_value, stage);    
    }   
}

void testAgentAgainstRandom(int test_depth, int test_end_game, int max_games)
{
    int countWhiteWin = 0;
    int countBlackWin = 0;

    int cutoff = (max_games+1) / 2;
    for (int i = 0; i < max_games; i++)
    {
        BoardState agent_color;
        if (i < cutoff)
        {
            agent_color = WHITE;
        }
        else
        {
            agent_color = BLACK;
        }
        
        Board board;
        
        bool _gameEnd = false;
        int _moves[100];
        BitBoard _effect[100];
        int stage = 0;
        BoardState player = WHITE;
        
        while (!_gameEnd)
        {
            
            gameState gs = parseBoard(board, BLOCKED_MAP);
            
            int mCount = getMoves(gs, BLOCKED_MAP, player, _moves, _effect);
            
            if (mCount != 0)
            {
                int m;
                if (player == agent_color)
                {
                    m = chooseMove(board, player, test_depth, test_end_game);
                }
                else
                {
                    int random_i = rand() % mCount;
                    m = _moves[random_i];
                }
                // apply move
                int y = m / 10;
                int x = m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cout << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                
                player = player == BLACK ? WHITE : BLACK;    
                
                gs_history[stage] = parseBoard(board, BLOCKED_MAP);
                stage++;
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = evaluateMobility(gs, player);
                if (mCount == 0)
                    break;       
            }
        }
        
        gameState gs = parseBoard(board, BLOCKED_MAP);
        
        if (agent_color == WHITE)
        {
            if (evaluatePieceDiff(gs) > 0)
            {
                countWhiteWin++;
                //cout << "agent win " << countWhiteWin << " as white" << endl;
            }
        }
        else
        {
            if (evaluatePieceDiff(gs) < 0)
            {
                countBlackWin++;
                //cout << "agent win " << countBlackWin << " as black" << endl;
            }
        }
        
        cout << i+1 << " games finished" << endl;
    }
    
    cout << "agent wins " << countWhiteWin << "/" << cutoff << " as white and ";
    cout << countBlackWin << "/" << max_games - cutoff << " as black" << endl;
    cout << "total " << countWhiteWin+countBlackWin << "/" << max_games << endl;
}

void testAgentAgainstSelf(int depth1, int depth2, int max_games)
{
    int d1w = 0;
    int d1b = 0;

    int cutoff = (max_games+1) / 2;
    
    const int end_game = 8;
    
    for (int i = 0; i < max_games; i++)
    {
        BoardState d1color;
        if (i < cutoff)
        {
            d1color = WHITE;
        }
        else
        {
            d1color = BLACK;
        }
        
        Board board;
        
        bool _gameEnd = false;
        int _moves[100];
        BitBoard _effect[100];
        int stage = 0;
        BoardState player = WHITE;
        
        while (true)
        {   
            gameState gs = parseBoard(board, BLOCKED_MAP);
            //printGameState(gs, BLOCKED_MAP, 1);
            
            int mCount = getMoves(gs, BLOCKED_MAP, player, _moves, _effect);
            
            if (stage == 88)
            {
                //cin.get();
                evaluateStabilityDiff(gs);
            }
            if (mCount != 0)
            {
                int m;
                if (player == d1color)
                {
                    m = chooseMove(board, player, depth1, end_game);
                }
                else
                {
                    m = chooseMove(board, player, depth2, end_game);
                }
                // apply move
                int y = m / 10;
                int x = m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cout << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                
                player = player == BLACK ? WHITE : BLACK;
                
                stage++;
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = evaluateMobility(gs, player);
                if (mCount == 0)
                    break;       
            }
        }
        
        gameState gs = parseBoard(board, BLOCKED_MAP);
        
        if (d1color == WHITE)
        {
            if (evaluatePieceDiff(gs) > 0)
            {
                d1w++;
            }
        }
        else
        {
            if (evaluatePieceDiff(gs) < 0)
            {
                d1b++;
            }
        }
        
        cout << i+1 << " games finished" << endl;
        //cout << "agent " << depth1 << " wins " << d1w+d1b << " games" << endl;
        //cin.get();
    }
    
    cout << "agent depth " << depth1 << ":\n";
    cout <<  "wins " << d1w << "/" << cutoff << " as white and ";
    cout << d1b << "/" << max_games - cutoff << " as black" << endl;
    cout << "total " << d1w+d1b << "/" << max_games << endl;
}

void trainAgent(int learn_depth, int learn_end_game, int max_learn)
{
    int game_count = 0;
    
    while (game_count < max_learn)
    {
        Board board;
        
        bool _gameEnd = false;
        int _moves[100], stage;
        BitBoard _effect[100];
 
        stage = 0;
        BoardState player = WHITE;
        
        while (!_gameEnd)
        {
            gameState gs = parseBoard(board, BLOCKED_MAP);
            //printGameState(gs, BLOCKED_MAP, 1);
            
            int mCount = getMoves(gs, BLOCKED_MAP, player, _moves, _effect);
            
            
            if (mCount != 0)
            {
                int _m;                 
                _m = chooseMove(board, player, learn_depth, learn_end_game);
                    
                // apply move and record consequent bit board
                int y = _m / 10;
                int x = _m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cout << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                
                gs_history[stage] = parseBoard(board, BLOCKED_MAP);
                stage++;
                
                player = player == BLACK ? WHITE : BLACK;    
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = evaluateMobility(gs, player);
                if (mCount == 0)
                    break;       
            }
        }
        // now learn from the history
        learn_online(stage, learn_end_game);
        
        // write weights back
        writeWeights();
        
        // make a backup every 100 games
        if ((game_count % 100) == 0)
        {
            char back_up[] = "weights_back.bin";
            writeWeights(back_up);
        }
        
        game_count++;
        cout << game_count << " games finished" << endl;
    }
}

int main(int argc, char * argv[])
{       
    initialiseAI();
    
    timeval tp;
    gettimeofday(&tp, NULL);
    srand(tp.tv_sec ^ tp.tv_usec);
    
    char buffer[256];
    int n = 1;
    int depth1 = 1;
    int depth2 = 6;
    
    if (argc == 1)
    {
        cout << "Usage: leaner [t|s|r] n depth1 depth2" << endl;
        cout << "t = train, s = test by self-play, r = test against random" << endl;
        cout << "n = total number of games to train/test" << endl;
    }
    else 
    {
        if (argc >= 3)
        {
            n = atoi(argv[2]);
        }
        if (argc >= 4)
            depth1 = atoi(argv[3]);
        if (argc >= 5)
            depth2 = atoi(argv[4]);
        if (argv[1][0] == 's')
        {
            if (argc < 4)
                depth2 = 1;
            testAgentAgainstSelf(depth1, depth2, n);   
        }
        else if (argv[1][0] == 't')
        {
            trainAgent(depth1, depth2, n);
        }
        else if (argv[1][0] == 'r')
        {
            testAgentAgainstRandom(depth1, depth2, n);
        }
    } 
    return 0;
}
