#include "AI.h"
#include <iostream>
#include <cstdio>
#include <cstring>
#include <cstdlib>
#include <ctime>
#include <sys/time.h>

using namespace std;

#define learning_rate 0.000001
#define Lambda 0.4
#define MAX_BATCH_STEP 10
#define MAX_BATCH_CONVERGE 5000
#define WIN_OFFSET TERMINAL_DIFF_WEIGHT * 2

#define MAX_BATCH 1000

const float RANDOM_CHANCE = 0;

double SumOfSquaredError(Feature_Values features[], float targets[], Weights weights, int batch_count);
bool Converged(Weights oldW, Weights newW);
void learn_batch(int N, int batch_count, bool fully_converge = false);
void trainAgentBatch(int learn_depth, int learn_end_game, int max_learn, int max_batch, bool fully_converge);

gameState batch_gs[MAX_BATCH][NUM_STAGES];
BitBoard batch_blocked[MAX_BATCH];
BoardState batch_player[MAX_BATCH][NUM_STAGES];

double SumOfSquaredError(Feature_Values features[], float targets[], Weights weights, int batch_count)
{
    double sum = 0;
    for (int i = 0; i < batch_count; i++)
    {
        double eval = evaluateGameL(features[i], weights);
        double diff = targets[i] - eval;
        
        sum += diff * diff;
        
        //cout << "target: " << targets[i] << "       eval: " << eval << endl; 
        //cout << "Cumulative squared error: " << sum << endl;
    }
    return sum;
}


// we determine convergence when the change in all weights is less than 0.1% for
// 4 consecutive updates
bool Converged(Weights oldW, Weights newW)
{
    static int converge_count = 0;
    
    float * w1 = &(oldW.disc_feature[0]);
    float * w2 = &(newW.disc_feature[0]);
    
    for (int i = 0; i < NUM_FEATURES; i++)
    {
        if (w1[i] > w2[i])
        {  
            if ((w1[i] / w2[i]) > 1.01)
            {
                converge_count = 0;
                return false;
            }
        }
        else
        {
            if ((w2[i] / w1[i]) > 1.01)
            {
                converge_count = 0;
                return false;
            }
        }
    }
    
    converge_count++;
    
    if (converge_count > 3)
    {
        converge_count = 0;
        return true;
    }
    return false;
}
    
/* batch train on the latest games and return the total squared error after the training */
void learn_batch(int N, int batch_count, bool fully_converge)
{
    int values[MAX_BATCH][NUM_STAGES];
       
    // assign terminal value to each game
    for (int game = 0; game < batch_count; game++)
    {
        int disc_diff = evaluatePieceDiff(batch_gs[game][NUM_STAGES-1]);
        int terminal_value = disc_diff * TERMINAL_DIFF_WEIGHT;
        
        if (terminal_value > 0)
            terminal_value += WIN_OFFSET;
        else if (terminal_value < 0)
            terminal_value -= WIN_OFFSET;
        
        for (int stage = NUM_STAGES - 1; stage > NUM_STAGES - N - 1; stage--)
        {
            values[game][stage] = terminal_value;
        }
    }
    
    // compute each stage's weights through back propagation
    for (int stage = NUM_STAGES - N - 1; stage >= 0; stage--)
    {      
        Feature_Values features[MAX_BATCH];
        float targets[MAX_BATCH];
        // compute feature vector and targets for each game
        for (int game = 0; game < batch_count; game++)
        {
            initialiseBoardSpecific(batch_blocked[game]);
            features[game] = GetFeatureValue(batch_gs[game][stage], batch_player[game][stage], batch_blocked[game]);
            
            targets[game] = 0;
            float divisor = 0;
            float exp = Lambda;
            // compute target as a weighted average of future stages' evaluation
            for (int future_stage = stage + 1; future_stage <= stage + N; future_stage++)
            {
                targets[game] += values[game][future_stage] * exp;
                divisor += exp;
                exp = exp * Lambda;
            }
            targets[game] = targets[game] / divisor;
        }
        
        Weights newWeights = WEIGHTS[stage];
        float * w = &(newWeights.disc_feature[0]);
        
        Weights oldWeights;
        
        // use different steps for full convergence to current set of games
        int max_batch_step;
        
        if (fully_converge)
            max_batch_step = MAX_BATCH_CONVERGE;
        else
            max_batch_step = MAX_BATCH_STEP;
            
        for (int i = 0; i < max_batch_step; i++)
        {
            oldWeights = newWeights;
            
            for (int game = 0; game < batch_count; game++)
            {
                float eval = evaluateGameL(features[game], newWeights);
                
                // temporal difference * learning rate
                float diff = (targets[game] - eval) * learning_rate;                
                int * fv = &(features[game].disc_feature[0]);
                for (int k = 0; k < NUM_FEATURES; k++)
                {
                    w[k] = w[k] + fv[k] * diff;
                }
            }
            
            if (fully_converge)
            {
                Weights change = newWeights - oldWeights;
                float * delta = &(change.disc_feature[0]);
                
                for (int k = 0; k < NUM_FEATURES; k++)
                {
                    int exp = 1;
                    double sumErrorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
                    double sumErrorOld = sumErrorNew + 100000000;
                    while (sumErrorOld > sumErrorNew)
                    {
                        sumErrorOld = sumErrorNew;
                        exp *= 2;
                        w[k] += delta[k] * exp;
                        sumErrorNew = SumOfSquaredError(features, targets, newWeights, batch_count);
                    }
                    w[k] -= delta[k] * exp;
                }
                
                // check for convergence
                if (Converged(oldWeights, newWeights))
                {
                    cerr << "stage " << stage << " converged\n";
                    break;
                }
            }
        }
        WEIGHTS[stage] = newWeights;
        
        // save the values
        for (int game = 0; game < batch_count; game++)
        {
            values[game][stage] = evaluateGameL(features[game], WEIGHTS[stage]);
        }
    }
}

double ComputeTotalError(int N, int batch_count)
{
    double sumAllError = 0.0;
    
    int values[MAX_BATCH][NUM_STAGES];
       
    // assign terminal value to each game
    for (int game = 0; game < batch_count; game++)
    {
        int disc_diff = evaluatePieceDiff(batch_gs[game][NUM_STAGES-1]);
        int terminal_value = disc_diff * TERMINAL_DIFF_WEIGHT;
        
        if (terminal_value > 0)
            terminal_value += WIN_OFFSET;
        else if (terminal_value < 0)
            terminal_value -= WIN_OFFSET;
        
        for (int stage = NUM_STAGES - 1; stage > NUM_STAGES - N - 1; stage--)
        {
            values[game][stage] = terminal_value;
        }
    }
    
    // compute each stage's weights through back propagation
    for (int stage = NUM_STAGES - N - 1; stage >= 0; stage--)
    {      
        Feature_Values features[MAX_BATCH];
        float targets[MAX_BATCH];
        
        // compute feature vector and targets for each game
        for (int game = 0; game < batch_count; game++)
        {
            initialiseBoardSpecific(batch_blocked[game]);
            features[game] = GetFeatureValue(batch_gs[game][stage], batch_player[game][stage], batch_blocked[game]);
            
            targets[game] = 0;
            float divisor = 0;
            float exp = Lambda;
            // compute target as a weighted average of future stages' evaluation
            for (int future_stage = stage + 1; future_stage <= stage + N; future_stage++)
            {
                targets[game] += values[game][future_stage] * exp;
                divisor += exp;
                exp = exp * Lambda;
            }
            targets[game] = targets[game] / divisor;
        }
        
        sumAllError += SumOfSquaredError(features, targets, WEIGHTS[stage], batch_count);
        
        for (int game = 0; game < batch_count; game++)
        {
            values[game][stage] = evaluateGameL(features[game], WEIGHTS[stage]);
        }
    }
    
    return sumAllError;
}
    
void updateWeights()
{
    const int MAX_HISTORY = 20;
    static Weights w_history[MAX_HISTORY][NUM_STAGES];
    static int count = 0;
    
    // shift the last 4 weight tables
    for (int i = count - 1; i > 0; i--)
        for (int stage = 0; stage < NUM_STAGES; stage++)
            w_history[i][stage] = w_history[i-1][stage];
            
    // save the new weight table
    for (int stage = 0; stage < NUM_STAGES; stage++)
        w_history[0][stage] = WEIGHTS[stage];
    
    if (count < MAX_HISTORY) count++;
    
    // determine minimum and maximum for each weight
    float minimum[NUM_STAGES][NUM_FEATURES];
    float maximum[NUM_STAGES][NUM_FEATURES];
    for (int stage = 0; stage < NUM_STAGES; stage++)
    {
        for (int i = 0; i < NUM_FEATURES; i++)
        {
            minimum[stage][i] = INFINITY;
            maximum[stage][i] = -INFINITY;
        }
    }
    
    for (int i = 0; i < count; i++)
    {
        for (int stage = 0; stage < NUM_STAGES; stage++)
        {
            float * w = &(w_history[i][stage].disc_feature[0]);
            for (int j = 0; j < NUM_FEATURES; j++)
            {
                if (w[j] < minimum[stage][j] && w[j] != 0)
                    minimum[stage][j] = w[j];
                if (w[j] > maximum[stage][j] && w[j] != 0)
                    maximum[stage][j] = w[j];
            }
        }
    }
    
    // determine which weight has the largest percentage change
    float maxChange = 0.0;
    int rStage = 0;
    int rFeature = 0;
    for (int stage = 0; stage < NUM_STAGES; stage++)
    {
        for (int feature = 0; feature < NUM_FEATURES; feature++)
        {
            float change = (maximum[stage][feature] - minimum[stage][feature]);// / minimum[stage][feature];
            if (change > maxChange)
            {
                maxChange = change;
                rStage = stage;
                rFeature = feature;
            }
        }
    }
    
    const char feature_names[][20] = {"corner", "r1c2", "r1c3", "r1c4", "r1c5", "r2c2",
                                      "r2c3", "r2c4", "r2c5", "r3c3", "r3c4", "r3c5",
                                      "r4c4", "r4c5", "r5c5", "mobility", "frontier",
                                      "disc count", "stability", "player" };
    cout << "Largest variation in weights: stage " << rStage << " " << feature_names[rFeature];
    printf(" from %.3f to %.3f\n", minimum[rStage][rFeature], maximum[rStage][rFeature]);
}

void trainAgentBatch(int learn_depth, int learn_end_game, int max_learn, int max_batch, bool fully_converge)
{
    int game_count = 0;
    int premature = 0;
    
    while (game_count < max_learn)
    {
        int batch_count = 0;
        while (batch_count < max_batch)
        {
            Board board;
            
            bool _gameEnd = false;
            int _moves[100];
            BitBoard _effect[100];
     
            int stage = 0;
            BoardState player = WHITE;
            
            while (!_gameEnd)
            {
                gameState gs = parseBoard(board, BLOCKED_MAP);
                //printGameState(gs, BLOCKED_MAP, 1);
                
                int mCount = getMoves(gs, BLOCKED_MAP, player, _moves, _effect);
                
                
                if (mCount != 0)
                {
                    int _m; 
                    
                    _m = chooseMove(board, player, learn_depth, learn_end_game);
                        
                    // apply move and record consequent bit board
                    int y = _m / 10;
                    int x = _m % 10;
                    if (!board.makeMove(x, y, player))
                    {
                        cout << "move " << x << ", " << y << " illegal\n";
                        return;
                    }
                    
                    batch_gs[batch_count][stage] = parseBoard(board, BLOCKED_MAP);
                    batch_player[batch_count][stage] = player;
                    
                    stage++;
                    
                    player = player == BLACK ? WHITE : BLACK;    
                }
                else
                {
                    player = player == BLACK ? WHITE : BLACK;
                    mCount = evaluateMobility(gs, player, BLOCKED_MAP);
                    if (mCount == 0)
                        break;       
                }
            }
            // only record fully terminated game
            if (stage >= NUM_STAGES)
            {
                batch_blocked[batch_count] = BLOCKED_MAP;
                batch_count++;
                game_count++;
            }
        }
        cout << game_count << " games finished" << endl;
        // cout << "Average error: " << ComputeTotalError(learn_end_game, max_batch) / NUM_STAGES / max_batch << endl;
        // learn weights using batch training on the latest games
        
        timeval start, end;
        gettimeofday(&start, NULL);
        learn_batch(learn_end_game, max_batch, fully_converge);
        
        // put the new weights in history and print the greatestly changed 
        // weight's % variance over the last 5 batches
        updateWeights();
         
        gettimeofday(&end, NULL);
        
        if (fully_converge)
        {
            cout << "time taken for convergence: " << end.tv_sec - start.tv_sec << " seconds" << endl;
        }
        
        // write weights back
        writeWeights();
    }
}

int main(int argc, char * argv[])
{       
    initialiseAI();
    
    timeval tp;
    gettimeofday(&tp, NULL);
    srand48(tp.tv_sec ^ tp.tv_usec);
    
    char buffer[256];
    int n = 1;
    int bn = 1;
    int depth1 = 1;
    int depth2 = 6;
    bool full_converge = false;
    
    if (argc == 1)
    {
        cout << "Usage: learner n [bn] [depth1] [depth2] [f]" << endl;
        cout << "      n: total number of games" << endl;
        cout << "     bn: number of games in batch (default = 1, max = 1000)" << endl;
        cout << "depth 1: search depth (default = 1)" << endl;
        cout << "depth 2: end game search depth (default = 6)" << endl;
        cout << "      f: flag for full convergence to lastest batch set" << endl;
    }
    else 
    {
        if (argc >= 2)
        {
            n = atoi(argv[1]);
        }
        if (argc >= 3)
            bn = atoi(argv[2]);
        if (argc >= 4)
            depth1 = atoi(argv[3]);
        if (argc >= 5)
            depth2 = atoi(argv[4]);
        if (argc >= 6 && argv[5][0] == 'f')
            full_converge = true;
               
        if (bn > 1000)
            bn = 1000;
                
        trainAgentBatch(depth1, depth2, n, bn, full_converge);
    } 
    return 0;
}
