#include <iostream>
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <sys/time.h>
#include "AI.h"
#include "evaluator.h"
#include "MPC.h"

using namespace std;

const float alpha = 1e-4;
const float lambda = 0.8f;
const float epsilon = 0.2f;

const int WIN_OFFSET = TERMINAL_DIFF_WEIGHT * 50;
const int BOUND = TERMINAL_DIFF_WEIGHT * 150;
const int BATCH_SIZE = 10000;
const int N = 6;
const int MAX_BATCH_CONVERGE = 100;

/* Global variables */

/* data structure used to record played games */
gameState boards[BATCH_SIZE][NUM_STAGES];
BitBoard blocked[BATCH_SIZE];
BoardState turn[BATCH_SIZE][NUM_STAGES];

/* normalise each feature */
float Normaliser[NUM_FEATURES];



/* Function declaration */

/* calculate the sum of squared errors between the prediction based on current
   features and the target values using specified weights                     */
double SumOfSquaredError(Feature_Values features[], float targets[], Weights weights, int batch_count);

/* train the agent using batch playing (number of games specified by max_count)
   games are played between two players of uneven strength, specified by 
   depth1 and depth2, which are each player's search depth                    */
void trainAgentBatch(int depth1, int depth2, int max_count);

/* learn from the played games by minimising the sum of squared error between
   each prediction and target values (found using TD-lambda)                  */
void learn_batch(int batch_count);

/* play completely random games and record them                               */
void playRandomGames(int max_count);

/* perform MPC search on the random board records and save the input/target 
   pairs to specified file                                                    */
void MPCSearch(int max_count);



/* Function definition */

/* calculate the inverse of the feature vector */
void inverse(Feature_Values features, float inv[])
{
    int * x = (int*) &features;
    
    float sum = 0;
    for (int i = 0; i < NUM_FEATURES; i++)
    {
        sum += (x[i] == 0 ? 0 : 1);
    }
    
    for (int i = 0; i < NUM_FEATURES; i++)
    {
        if (sum != 0 && x[i] != 0)
            inv[i] = 1 / (sum * x[i]);
        else
            inv[i] = 0;
    }
    
    /*
    if (sum != 0)
    {
        sum = 0;
        for (int i = 0; i < NUM_FEATURES; i++)
            sum += inv[i] * x[i];
            
        assert(absolute(sum - 1) < 0.00001);
    }
    */
}

/* calculate the sum of squared errors between the prediction based on current
   features and the target values using specified weights                     */
double SumOfSquaredError(Feature_Values features[], float targets[], Weights weights, int batch_count)
{
    double sum = 0;
    for (int i = 0; i < batch_count; i++)
    {
        if (absolute(targets[i]) > BOUND)
            continue;
            
        double eval = evaluateGameL(features[i], weights);
        double diff = targets[i] - eval;
        
        sum += diff * diff;
    }
    return sum;
}

/* calculate the mean error between the prediction based on current
   features and the target values using specified weights                     */
double MeanError(Feature_Values features[], float targets[], Weights weights, int batch_count)
{
    double sum = 0;
    for (int i = 0; i < batch_count; i++)
    {
        if (absolute(targets[i]) > BOUND)
            continue;
        double eval = evaluateGameL(features[i], weights);
        sum += targets[i] - eval;
    }
    return sum / batch_count;
}

/* learn from the played games by minimising the sum of squared error between
   each prediction and target values (found using TD-lambda)                  */
void learn_batch(int batch_count)
{   
    float values[BATCH_SIZE][NUM_STAGES];
    
    float errorMean = 0.0f;
    
    // assign terminal values
    for (int game = 0; game < batch_count; game++)
    {
        float terminal_value = evaluatePieceDiff(boards[game][NUM_STAGES-1]) * TERMINAL_DIFF_WEIGHT;
        if (terminal_value > 0)
            terminal_value += WIN_OFFSET;
        else if (terminal_value < 0)
            terminal_value -= WIN_OFFSET;
            
        for (int stage = NUM_STAGES - 1; stage >= NUM_STAGES - N; stage--)
        {
            values[game][stage] = terminal_value;
        }
    }
    
    // learn weights through back propagation, weights for
    // the last few stages are ignored (solved by brute force)
    for (int stage = NUM_STAGES - N - 1; stage >= 0; stage--)
    {
        Feature_Values features[BATCH_SIZE];
        float targets[BATCH_SIZE];
        
        // computer targets and feature vector for each game
        for (int game = 0; game < batch_count; game++)
        {
            initialiseBoardSpecific(blocked[game]);
            features[game] = GetFeatureValue(boards[game][stage], turn[game][stage], blocked[game]);
            
            // find target
            targets[game] = 0;
            float divisor = 0;
            float exp = lambda;
            for (int n = 1; n <= N && stage+n < NUM_STAGES; n++)
            {    
                targets[game] += values[game][stage+n] * exp;
                divisor += exp;
                exp *= lambda;
            }
            targets[game] /= divisor;   
        }
        
        Weights newWeights, oldWeights;
        newWeights = WEIGHTS[stage];
        float * w = (float*) &newWeights;
        
        for (int i = 0; i < MAX_BATCH_CONVERGE; i++)
        {
            oldWeights = newWeights;
            
            for (int game = 0; game < batch_count; game++)
            {
                if (absolute(targets[game]) > BOUND)
                    continue;
                    
                float eval = evaluateGameL(features[game], newWeights);
                float diff = (targets[game] - eval) * alpha;
                int * fv = (int*) &features[game];
                
                float inv[NUM_FEATURES];
                inverse(features[game], inv);
                for (int k = 0; k < NUM_FEATURES; k++)
                    w[k] += diff * inv[k];         
            }
            
            Weights change = newWeights - oldWeights;
            float * delta = (float*) &change;
            
            
            // group speculative jumping
            double errorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
            double errorOld = errorNew + 10000000;
            int exp = 1;
            while (errorOld > errorNew)
            {
                errorOld = errorNew;
                for (int k = 0; k < NUM_FEATURES; k++)
                    w[k] += delta[k] * exp;
                errorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
                exp *= 2;
            }
            exp /= 2;
            for (int k = 0; k < NUM_FEATURES; k++)
                w[k] -= delta[k] * exp;   
            
            // individual speculative jumping
            for (int k = 0; k < NUM_FEATURES; k++)
            {                  
                double errorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
                double errorOld = errorNew + 1000000;
                float exp = 1;
                while (errorOld > errorNew)
                {
                    errorOld = errorNew;
                    w[k] += delta[k] * exp;
                    errorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
                    exp *= 2;
                }
                exp /= 2;
                w[k] -= delta[k] * exp;
            }
                
            if (Converged(SumOfSquaredError(features, targets, newWeights, batch_count), 1e-3))
            {
                //cerr << "stage " << stage << " converged" << endl;
                break;
            }
        }
        
        WEIGHTS[stage] = newWeights;
        
        //cerr << "error mean: " << MeanError(features, targets, newWeights, batch_count) << endl;
        errorMean += absolute(MeanError(features, targets, newWeights, batch_count));
        
        for (int game = 0; game < batch_count; game++)
        {
            values[game][stage] = evaluateGameL(features[game], newWeights);
        }
    }
    
    cerr << " error mean = " << errorMean / NUM_STAGES << endl;
}

/* train the agent using batch playing (number of games specified by max_batch)
   games are played between two players of uneven strength, specified by 
   depth1 and depth2, which are each player's search depth                    */
void trainAgentBatch(int depth1, int depth2, int max_batch)
{
    int game_count = 0;
    
    while (game_count < max_batch)
    {
        
        Board board;
            
        bool _gameEnd = false;
        int _moves[100];
        BitBoard _effect[100];
     
        int stage = 0;
        BoardState player = WHITE;
        BoardState weak_player;
        
        if (game_count % 2 == 0)
            weak_player = WHITE;
        else
            weak_player = BLACK;
            
        while (!_gameEnd)
        {
            gameState gs = parseBoard(board, blocked[game_count]);
                
            int mCount = getMoves(gs, blocked[game_count], player, _moves, _effect);
              
                
            if (mCount != 0)
            {

                int _m; 
                    
                // choose a random move with posibility epsilon
                if (rand() % 10000 / 10000.0f < epsilon && stage < 15)
                    _m = _moves[rand()%mCount];
                else if (player == weak_player)
                    _m = chooseMove(board, player, depth1, 6);
                else
                    _m = chooseMove(board, player, depth2, 6);
                        
                // apply move and record consequent bit board
                int y = _m / 10;
                int x = _m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cerr << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                    
                boards[game_count][stage] = parseBoard(board, blocked[game_count]);
                turn[game_count][stage] = player;
                    
                stage++;
                    
                player = player == BLACK ? WHITE : BLACK;    
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = getMoves(gs, blocked[game_count], player, _moves, _effect);
                if (mCount == 0)
                    break;       
            }
        }
            // only record fully terminated game
        if (stage >= NUM_STAGES)
        {
            game_count++;  
            //cerr << game_count << " games finished" << endl;
        }
    }
    cerr << game_count << " games finished";
    learn_batch(max_batch); 
}


/* let two agents using different weights play n games as each color against 
   each other, return number of games agent1 wins (or loses if result is negative */
int testAgent(Weights agent1[], Weights agent2[], int n, int depth, int end_game = 6)
{
    int game_count = 0;
    int result = 0;
    
    while (game_count < 2*n)
    {
        Board board;
        
        BoardState player = WHITE;
        BoardState winner = EMPTY;
        BoardState agent1color;
        
        if (game_count % 2 == 0)
            agent1color = WHITE;
        else
            agent1color = BLACK;
         
        // play a game   
        while (true)
        {
            BitBoard blocks;
            gameState gs = parseBoard(board, blocks);
            
            int temp;
            int mCount = evaluateMobility(gs, player, blocks, temp);
            
            if (mCount != 0)
            {
                // play a move by different agent
                if (player == agent1color)
                    SetWeights(agent1);
                else
                    SetWeights(agent2);
                    
                int m = chooseMove(board, player, depth, end_game);
                        
                // apply move and switch player
                int y = m / 10;
                int x = m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cerr << "move " << x << ", " << y << " illegal\n";
                    return 0;
                }                  
                player = player == BLACK ? WHITE : BLACK; 
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                int temp;
                int mCount = evaluateMobility(gs, player, blocks, temp);
                if (mCount == 0)
                    break;  
            }
        }
        int pieceDiff = board.cellCount(WHITE) - board.cellCount(BLACK);
        if (pieceDiff > 0)
            winner = WHITE;
        else if (pieceDiff < 0)
            winner = BLACK;
        
        if (winner == agent1color)
            result++;
        else if (winner != EMPTY)
            result--;
            
        game_count++;
    }
    
    return result;
}

/* play completely random games and record them */
void playRandomGames(int max_count)
{
    int game_count = 0;
    
    while (game_count < max_count)
    {
        Board board;
        int _moves[100];
        BitBoard _effect[100];
     
        int stage = 0;
        BoardState player = WHITE;
            
        while (true)
        {
            gameState gs = parseBoard(board, blocked[game_count]);
                
            int mCount = getMoves(gs, blocked[game_count], player, _moves, _effect);
                  
            if (mCount != 0)
            {
                // choose a random move
                int _m = _moves[rand()%mCount];
                        
                // apply move and record consequent bit board
                int y = _m / 10;
                int x = _m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cerr << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                    
                boards[game_count][stage] = parseBoard(board, blocked[game_count]);
                turn[game_count][stage] = player;
                    
                stage++;
                    
                player = player == BLACK ? WHITE : BLACK;    
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = getMoves(gs, blocked[game_count], player, _moves, _effect);
                if (mCount == 0)
                    break;       
            }
        }
        // only record fully terminated game
        if (stage >= NUM_STAGES)
        {
            game_count++;
        }
    }
    cerr << game_count << " games finished\n";
}

/* perform MPC search on the random board records and save the input/target 
   pairs to specified file                                                    */
   
/*
gameState boards[BATCH_SIZE][NUM_STAGES];
BitBoard blocked[BATCH_SIZE];
BoardState turn[BATCH_SIZE][NUM_STAGES];
*/

void MPCSearch(int max_count)
{
    for (int game = 0; game < max_count; game++)
    {
        PerformSearch(boards[game], blocked[game], turn[game]);
    }
}



int main(int argc, char * argv[])
{       
    initialiseAI();
    
    timeval tp, tp2;
    gettimeofday(&tp, NULL);
    srand48(tp.tv_sec ^ tp.tv_usec);
    srand(tp.tv_sec ^ tp.tv_usec);
    
    char buffer[256];
    int n = 1;
    int depth1 = 1;
    int depth2 = 1;
    
    if (argc == 1)
    {
        cout << "Usage: learner option [n] [depth1] [depth2]" << endl;
        cout << "options:\n";
        cout << "      b - batch TD learning by self-playing n games" << endl;
        cout << "      p - play 100 games against the backup weights and store the winner" << endl;
        cout << "      s - perform MPC search for n games and append data pair to file" << endl;
        cout << "      f - fit the MPC data points stored in file" << endl;
        cout << "      t - test against benchmark player" << endl;
        cout << "param:\n";
        cout << "      n - total number of games to play" << endl;
        cout << " depth1 - search depth of the shallow player (default = 1)" << endl;
        cout << " depth2 - search depth of the deep player (default = 2)" << endl;
        return 0;
    }
    else 
    {
        if (argc >= 3)
            n = atoi(argv[2]);
        if (argc >= 4)
            depth1 = atoi(argv[3]);
        if (argc >= 5)
            depth2 = atoi(argv[4]);
        
        if (argv[1][0] == 'b')
        {
            if (n > BATCH_SIZE)
                n = BATCH_SIZE;
            // make a backup of the current weights
            WriteWeights("weights_back");
            trainAgentBatch(depth1, depth2, n);          
            WriteWeights();
        }
        else if (argv[1][0] == 'p')
        {
            Weights current[NUM_STAGES], benchmark[NUM_STAGES];
            GetWeights(current);
            char file[] = "weights_back";
            if (ReadWeights(benchmark, file))
            {
                int total = n / 2;
                
                // play 200 games against the previous weights
                int result = testAgent(current, benchmark, total, depth1, 8);
                cout << "wins " << result/2 + total << "/" << total*2 << " against previous weights\n";
                
                if (result < 6)
                {
                    SetWeights(benchmark);
                    GetWeights(current);
                }
            }
            else
                cout << "no previous weights exist at the moment" << endl;
                  
            SetWeights(current);
            WriteWeights();
        }
        else if (argv[1][0] == 's')
        {
            playRandomGames(n);
            MPCSearch(n);
        }
        else if (argv[1][0] == 'f')
        {
            FitMPC();
        }
        else if (argv[1][0] == 't')
        {
            Weights current[NUM_STAGES], benchmark[NUM_STAGES];
            GetWeights(current);
            char file[] = "benchmark_weights";
            if (ReadWeights(benchmark, file))
            {
                int total = n / 2;
                // play 100 games against the benchmark weights
                int result = testAgent(current, benchmark, total, depth1, 8);
        
                cout << "current player wins " << result/2 + total << "/" << total*2 << " against benchmark\n";
            }
            else
                cout << "no benchmark exist at the moment" << endl;
        }
    }
    
    gettimeofday(&tp2, NULL);
    cout << "time taken: " << tp2.tv_sec - tp.tv_sec << " seconds\n";
    return 0;
}
