#include <iostream>
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <sys/time.h>
#include "AI.h"
#include "MPC.h"

using namespace std;

#define MAX_POPULATION 100
#define MAX_BATCH 10000
#define N 6

double absolute(double a)
{
    return a > 0 ? a : -a;
}

const float epsilon = 0.1;
const float alpha = 0.1;

gameState boards[MAX_BATCH][NUM_STAGES];
BitBoard blocked[MAX_BATCH];
BoardState turn[MAX_BATCH][NUM_STAGES];

// find an inverse for the feature vector
void inverse(Feature_Values f, float inv[])
{
    int * x = (int *) &f;
    int n = 0;
    
    for (int i = 0; i < NUM_FEATURES; i++)
        n += x[i] == 0 ? 0 : 1;
    
    for (int i = 0; i < NUM_FEATURES; i++)
        inv[i] = x[i] == 0 ? 0 : 1.0f/(x[i] * n);
        
    // verify that x * x^(-1) == 1
    float sum = 0.0f;
    for (int i = 0; i < NUM_FEATURES; i++)
        sum += x[i] * inv[i];
    
    if (n != 0 && absolute(sum - 1) > 0.00001)
    {
        cerr << "x * x^(-1) != " << sum << endl;
        for (int i = 0; i < NUM_FEATURES; i++)
            cerr << "x[" << i << "] = " << x[i]
                 << "  inv[" << i << "] = " << inv[i] << endl;
        exit(0);
    }   
}

void learn(int max_stages, int win_offset)
{
    float value[NUM_STAGES];
    
    // value of the last position determined by disc difference
    int disc_diff = evaluatePieceDiff(boards[0][max_stages - 1]);
    int terminal_value = disc_diff * TERMINAL_DIFF_WEIGHT;
    
    if (terminal_value > 0)
        terminal_value += win_offset;
    else if (terminal_value < 0)
        terminal_value -= win_offset;
    
    value[max_stages - N] = terminal_value;

    for (int stage = max_stages - N - 1; stage >= 0; stage--)
    {
        initialiseBoardSpecific(blocked[0]);
        Feature_Values feature = GetFeatureValue(boards[0][stage], turn[0][stage], blocked[0]);
        
        float TD = value[stage+1] - evaluateGameL(feature, WEIGHTS[stage]);
        
        // calculate the inverse vector of features
        float inv[NUM_FEATURES];
        inverse(feature, inv);
        
        // update each weight so that
        // eval(stage) = eval(stage) + alpha * (eval(stage+1) - eval(stage))
        float * w = (float*) &WEIGHTS[stage];
        for (int i = 0; i < NUM_FEATURES; i++)
            w[i] = w[i] + alpha * TD * inv[i];
            
        value[stage] = evaluateGameL(feature, WEIGHTS[stage]);    
    }   
}

void trainAgentTD(int max_learn, int win_offset)
{
    int game_count = 0;
    
    while (game_count < max_learn)
    {
        Board board;
        
        bool _gameEnd = false;
        int _moves[100], stage;
        BitBoard _effect[100];
 
        stage = 0;
        BoardState player = WHITE;
        
        while (!_gameEnd)
        {
            gameState gs = parseBoard(board, blocked[0]);
            
            int mCount = getMoves(gs, blocked[0], player, _moves, _effect);
            
            
            if (mCount != 0)
            {
                int _m; 
                
                // use epsilon-greedy policy to choose the next move
                if ((rand() % 100000) / 100000.0f < epsilon)
                    _m = _moves[rand() % mCount];
                else
                    _m = chooseMove(board, player, 1, 0);
                    
                // apply move and record consequent bit board
                int y = _m / 10;
                int x = _m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cout << "move " << x << ", " << y << " illegal\n";
                    return;
                }
                
                boards[0][stage] = parseBoard(board, blocked[0]);
                turn[0][stage] = player;
                stage++;
                
                player = player == BLACK ? WHITE : BLACK;    
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = evaluateMobility(gs, player, blocked[0]);
                if (mCount == 0)
                    break;       
            }
        }
        
        // learn from the history
        learn(stage, win_offset);
        game_count++;
        //cerr << game_count << " games finished" << endl;
    }
}


/* let two agents using different weights play n games as each color against 
   each other, return number of games agent1 wins (or loses if result is negative */
int testAgent(Weights agent1[], Weights agent2[], int n, int depth)
{
    int game_count = 0;
    int result = 0;
    
    while (game_count < 2*n)
    {
        Board board;
        
        BoardState player = WHITE;
        BoardState winner = EMPTY;
        BoardState agent1color;
        
        if (game_count % 2 == 0)
            agent1color = WHITE;
        else
            agent1color = BLACK;
         
        // play a game   
        while (true)
        {
            BitBoard blocks;
            gameState gs = parseBoard(board, blocks);
            
            int mCount = evaluateMobility(gs, player, blocks);
            
            if (mCount != 0)
            {
                // play a move by different agent
                if (player == agent1color)
                    SetWeights(agent1);
                else
                    SetWeights(agent2);
                    
                int m = chooseMove(board, player, depth + rand()%2, 6);
                        
                // apply move and switch player
                int y = m / 10;
                int x = m % 10;
                if (!board.makeMove(x, y, player))
                {
                    cerr << "move " << x << ", " << y << " illegal\n";
                    return 0;
                }                  
                player = player == BLACK ? WHITE : BLACK; 
            }
            else
            {
                player = player == BLACK ? WHITE : BLACK;
                mCount = evaluateMobility(gs, player, blocks);
                if (mCount == 0)
                    break;  
            }
        }
        int pieceDiff = board.cellCount(WHITE) - board.cellCount(BLACK);
        if (pieceDiff > 0)
            winner = WHITE;
        else if (pieceDiff < 0)
            winner = BLACK;
        
        if (winner == agent1color)
            result++;
        else if (winner != EMPTY)
            result--;
            
        game_count++;
    }
    
    return result;
} 

void trainAgentCoevolution(int population)
{
    const int TD_GAMES = 100;
    const int games_per_color = 10;
    const float beta = 0.2;

    Weights oldGeneration[NUM_STAGES];
    Weights newGeneration[MAX_POPULATION][NUM_STAGES];
    int fitness[MAX_POPULATION];
    
    for (int i = 0; i < NUM_STAGES; i++)
    {
        // set parent generation to current weights
        oldGeneration[i] = WEIGHTS[i];
        
        // the 'cloned' child
        newGeneration[0][i] = WEIGHTS[i];
    }
        
    fitness[0] = 0;
    // generate new generation using TD by self-playing
    for (int i = 1; i < population; i++)
    {
        SetWeights(oldGeneration);
        
        trainAgentTD(TD_GAMES, 60 * TERMINAL_DIFF_WEIGHT);
        
        GetWeights(newGeneration[i]);
        
        //cerr << "child " << i << " generated" << endl;
       
        fitness[i] = 0;
    }
    
    // play a round-robin tournament
    for (int i = 0; i < population; i++)
    {
        for (int j = i+1; j < population; j++)
        {
            int result = testAgent(newGeneration[i], newGeneration[j], games_per_color, 1);
            fitness[i] += result;
            fitness[j] -= result;
        }
    }
    
    // select the best of new generation
    int fittest = -10000;
    int best = -1;
    for (int i = 0; i < population; i++)
    {
        if (fitness[i] > fittest)
        {
            fittest = fitness[i];
            best = i;
        }
    }
    
    int total = games_per_color * 2 * (population - 1);
    cout << "best of the new generation: " << best << endl;
    cout << "wins " << (fitness[best]+total) / 2 << " out of " << total << " games played\n";
    
    for (int stage = 0; stage < NUM_STAGES; stage++)
        oldGeneration[stage] = oldGeneration[stage] + beta * (newGeneration[best][stage] - oldGeneration[stage]);
        
    Weights benchmark[NUM_STAGES];
    
    
    char file[] = "benchmark_weights";
    if (ReadWeights(benchmark, file))
    {
        // play 20 games against the benchmark weights
        int result = testAgent(oldGeneration, benchmark, 10, 1);
        
        cout << "wins " << (result+20)/2 << " out of 20 games against benchmark player\n";
    }
    else
        cout << "no benchmark exist at the moment" << endl;
        
    SetWeights(oldGeneration);
}



int main(int argc, char * argv[])
{       
    initialiseAI();
    
    timeval tp;
    gettimeofday(&tp, NULL);
    srand48(tp.tv_sec ^ tp.tv_usec);
    srand(tp.tv_sec ^ tp.tv_usec);
    
    char buffer[256];
    int n = 1;
    int depth1 = 1;
    int depth2 = 6;
    
    if (argc == 1)
    {
        cout << "Usage: learner t|c [n] [depth1] [depth2]" << endl;
        cout << "    t|c: batch TD learning or co-evolution learning" << endl;
        cout << "      n: total number of games (TD) / size of new population (CEL)" << endl;
        cout << "depth 1: search depth (default = 1)" << endl;
        cout << "depth 2: end game search depth (default = 6)" << endl;
    }
    else 
    {
        if (argc >= 3)
            n = atoi(argv[2]);
        if (argc >= 4)
            depth1 = atoi(argv[3]);
        if (argc >= 5)
            depth2 = atoi(argv[4]);
        
        if (argv[1][0] == 't')
        {
            if (n > MAX_BATCH)
                n = MAX_BATCH;
            trainAgentTD(n, 100);
        }
        else if (argv[1][0] == 'c')
        {
            if (n > MAX_POPULATION)
                n = MAX_POPULATION;
            trainAgentCoevolution(n);
        }
        
        writeWeights();
    } 
    return 0;
}
