#include "network.h"
#include "utils.h"
#include "parser.h"
#include "option_list.h"
#include "blas.h"
#include "gomoku.h"
#include "image.h"

int player_random(Game *game){
    int legal_pos = 0;
    for (Pos p=0; p < game->wh; p++){
        if(!game->board_c[p]){
            legal_pos += 1;
        }
    }
    Pos rand_pos=0;
    
    while (game->board_c[rand_pos]) rand_pos++;
    for (int p = rand_int(0, legal_pos-1); p; p--){
        rand_pos++;
        while (game->board_c[rand_pos]){
            rand_pos++;
        }
    }
    return rand_pos;
}

int player_net_hard(network *net, Game *game){
    float* prob = network_predict(*net, game->board);
    float* prob_cur = prob + game->wh * (!game->cur_node->player);
    print_grid(prob_cur, net->w, net->h);
    // cuda_pull_array(net->layers[net->n-4].output_gpu, net->layers[net->n-4].output, net->layers[net->n-4].outputs);
    // activate_array(net->layers[net->n-4].output, net->layers[net->n-4].outputs, LOGISTIC);
    // float* prob_w_cur = net->layers[net->n-4].output + game->wh * (!game->cur_node->player);
    // print_grid(prob_w_cur, net->w, net->h);

    float max_prob = -1;
    Pos max_pos = -1;
    for (Pos p=0; p < game->wh; p++){
        if(game->board_c[p]) continue;
        float prob = prob_cur[p];
        if(prob > max_prob){
            max_pos = p;
            max_prob = prob;
        }
    }
    if(max_pos==-1) return player_random(game);
    return max_pos;
}

int player_net(network *net, Game *game){
    float* prob = network_predict(*net, game->board);
    float* prob_cur = prob + game->wh * (!game->cur_node->player);
    print_grid(prob_cur, net->w, net->h);

    float legal_prob = 0;
    for (Pos p=0; p < game->wh; p++){
        if(!game->board_c[p]){
            legal_prob += prob_cur[p];
        } else {
            prob_cur[p] = 0;
        }
    }
    Pos rand_pos=0;
    float p=random_float()*legal_prob-prob_cur[rand_pos];
    while (p>0 && rand_pos<game->wh){
        rand_pos++;
        p-=prob_cur[rand_pos];
    }
    if(game->board_c[rand_pos]) return player_random(game);
    return rand_pos;
}

int player_net_cold(network *net, Game *game){
    float temp = net->layers[net->n-1].temperature;
    net->layers[net->n-1].temperature = 1;
    int pos = player_net(net, game);
    net->layers[net->n-1].temperature = temp;
    return pos;
}

int player_net_with_prob(float* prob, Game *game){
    float* prob_cur = prob + game->wh * (!game->cur_node->player);
    // print_grid(prob_cur, game->w, game->h);
    float legal_prob = 0;
    for (Pos p=0; p < game->wh; p++){
        if(!game->board_c[p]){
            legal_prob += prob_cur[p];
        } else {
            prob_cur[p] = 0;
        }
    }
    Pos rand_pos=0;
    float p=random_float()*legal_prob-prob_cur[rand_pos];
    while (p>0 && rand_pos<game->wh){
        rand_pos++;
        p-=prob_cur[rand_pos];
    }
    if(game->board_c[rand_pos]) return player_random(game);
    return rand_pos;
}

float train_single_iter(network *net, network *net_inf, Game *game){
    float loss=0;

    // allocate memory
    static Game* game_tmp = NULL;
    static float* board_tmp = NULL;
    static float* floating_prior_gpu = NULL;
    if(game_tmp == NULL){
        game_tmp = (Game*)xcalloc(net->round, sizeof(Game));
        board_tmp = (float*)xcalloc(3 * game->wh * net->round, sizeof(float));
        float* floating_prior = (float*)xcalloc(game->wh * net->round, sizeof(float));
        float sum=0;
        for (int i=0; i < game->h; i++){
            for (int j=0; j < game->w; j++){
                int p = i * game->w + j;
                floating_prior[p] = fmin(j, game->w - j - 1) + fmin(i,  game->h - i - 1) + 1;
                sum += floating_prior[p];
            }
        }
        scal_cpu(game->wh, 1. / sum, floating_prior, 1);
        floating_prior_gpu = cuda_make_array(floating_prior, game->wh);
        free(floating_prior);
        for (int r=0; r < net->round; r++) game_tmp_init(game_tmp+r, net);
    }

    // prepare batch
    game->memory_pos = 0;
    int sbatch_map[net->round];
    for (int hand=0; 1; hand++){
        int sbatch_size=0;
        int net_play = rand_int(0, 1);
        for (int r=0; r < net->round; r++){
            if (game_tmp[r].winner == 3){
                memcpy(board_tmp + sbatch_size * 3 * game->wh, game_tmp[r].board, 3 * game->wh * sizeof(float));
                sbatch_map[sbatch_size++] = r;
            }
        }
        // printf("s: %d\n", sbatch_size);
        if (!sbatch_size) break;
        // if(*net->cur_iteration < 50){
        //     for (int s=0; s < sbatch_size; s++){
        //         int r = sbatch_map[s];
        //         game_place(game_tmp + r, player_random(game_tmp + r));
        //         game_tmp[r].winner = game_end(game_tmp + r);
        //     }
        //     continue;
        // }
        set_batch_network(net_inf, sbatch_size);
        float* policy = network_predict(*net_inf, board_tmp);
        layer *p_layer = &net_inf->layers[net_inf->n - 4];
        activate_array_ongpu(p_layer->output_gpu, p_layer->outputs * sbatch_size, LOGISTIC);
        cuda_pull_array(p_layer->output_gpu, p_layer->output, p_layer->outputs * sbatch_size);
        for (int s=0; s < sbatch_size; s++){
            int r = sbatch_map[s];
            memcpy(game_tmp[r].cur_node->policy, policy + s * 2 * game->wh, 2 * game->wh * sizeof(float));
            memcpy(game_tmp[r].cur_node->prob,  p_layer->output + s * 2 * game->wh, 2 * game->wh * sizeof(float));
            game_place(game_tmp + r, player_net_with_prob(policy + s * 2 * game->wh, game_tmp + r));
            game_tmp[r].winner = game_end(game_tmp + r);
        }
    }
    for (int r=0; r < net->round; r++){
        Game *this_game = game_tmp + r;
        if(this_game->winner < 2){
            for (int rev_i=0; this_game->cur_node != this_game->root; rev_i++){
                game->player_memory[game->memory_pos] = this_game->cur_node->player;
                game->winner_memory[game->memory_pos] = this_game->winner;

                Pos pos = game_unplace(this_game);

                float *board_m = game->board_memory + game->wh * 3 * game->memory_pos;
                float *policy_m = game->mean_policy_memory + game->wh * 2 * game->memory_pos;
                float *prob_m = game->mean_prob_memory + game->wh * 2 * game->memory_pos;

                memcpy(board_m,  this_game->board,             3 * game->wh * sizeof(float));
                memcpy(policy_m, game_tmp[r].cur_node->policy, 2 * game->wh * sizeof(float));
                memcpy(prob_m,   game_tmp[r].cur_node->prob,   2 * game->wh * sizeof(float));

                int x = pos % game->w, y = pos / game->w;
                int flip = rand()%2;
                int rotate = rand()%4;
                image board_im  = float_to_image(game->w, game->h, 3, board_m);
                image policy_im = float_to_image(game->w, game->h, 2, policy_m);
                image prob_im   = float_to_image(game->w, game->h, 2, prob_m);
                if(flip){
                    flip_image(board_im);
                    flip_image(policy_im);
                    flip_image(prob_im);
                    x = game->w - 1 - x;
                }
                rotate_image_cw(board_im, rotate);
                rotate_image_cw(policy_im, rotate);
                rotate_image_cw(prob_im, rotate);
                for (; rotate; rotate--) {
                    int swap = x;
                    x = y;
                    y = game->w - 1 - swap;
                }
                pos = x + y * game->w;

                game->pos_memory[game->memory_pos] = pos;
                game->iter_memory[game->memory_pos] = game->n_playout;
                game->memory_pos++;
                // if(rev_i && *net->cur_iteration < 10000 * rev_i + rand_int(0, 10000)) break;
            }
            game->n_playout++;
        }
    }
    for (int r=0; r < net->round; r++) game_reset(game_tmp+r);

    float p_target = 1;
    int cur_iter = -1;
    layer *q_layer = &net->layers[net->n-1];
    layer *p_layer = &net->layers[net->n-4];
    network_state state = {0};
    state.net = *net;
    state.train = 1;

    for (int s=0; s < game->memory_pos; s += net->max_play){
        int batch_size = game->memory_pos - s;
        if (batch_size > net->max_play) batch_size = net->max_play;
        set_batch_network(net, batch_size);

        state.input = cuda_make_array(
            game->board_memory + s * 3 * game->wh,
            batch_size * 3 * game->wh
        );
        float *mean_policy = cuda_make_array(
            game->mean_policy_memory + s * 2 * game->wh,
            batch_size * 2 * game->wh
        );
        float *mean_prob = cuda_make_array(
            game->mean_prob_memory + s * 2 * game->wh,
            batch_size * 2 * game->wh
        );

        forward_network_gpu(*net, state);
        float *prob_pred = cuda_make_array(p_layer->output_gpu, p_layer->outputs * net->batch);
        activate_array_ongpu(prob_pred, p_layer->outputs * net->batch, LOGISTIC);

        float *prob_mat_gpu = cuda_make_array(NULL, p_layer->outputs * net->batch);
        float prob_mat[game->wh];
        activate_and_mult(prob_pred, q_layer->output_gpu, p_layer->outputs * net->batch, LINEAR, prob_mat_gpu);

        for (int i=0; i < batch_size; i++){
            uint8_t winner = game->winner_memory[i + s];
            uint8_t player = game->player_memory[i + s];
            const Pos pos = game->pos_memory[i + s];
            if(game->iter_memory[i + s] != cur_iter){
                p_target = 1;
                assert(player == winner);
                cur_iter = game->iter_memory[i + s];
            }

            float p_prob, p_delta=0;

            cuda_pull_array(prob_pred + (player + 2 * i) * game->wh + pos, &p_prob, 1);
            if(p_prob < 1e-6) p_prob = 1e-6;
            if(p_prob > 1-1e-6) p_prob = 1-1e-6;
            loss += -log(p_prob) * p_target - log(1-p_prob) * (1 - p_target);
            p_delta = p_target - p_prob;
            cuda_push_array(p_layer->delta_gpu  + (player + 2 * i) * game->wh + pos, &p_delta, 1);

            cuda_pull_array(prob_mat_gpu + (player + 2 * i) * game->wh, prob_mat, game->wh);
            p_prob = sum_array(prob_mat, game->wh);
            // p_target = 0.1 * (1 - p_prob) + 0.9 * (1 - p_target);
            p_target = 1 - p_target;
            softmax_vi_gpu(
                game->wh,
                q_layer->output_gpu + (player + 2 * i) * game->wh,
                prob_pred + (player + 2 * i) * game->wh,
                1. / q_layer->temperature,
                floating_prior_gpu,
                q_layer->delta_gpu + (player + 2 * i) * game->wh
            );
            scal_ongpu(game->wh, 0.999, floating_prior_gpu, 1);
            axpy_ongpu(game->wh, 0.001, mean_policy + (player + 2 * i) * game->wh, 1, floating_prior_gpu, 1);
        }
        sigmoid_kl_gpu(game->wh, net->batch, p_layer->output_gpu, state.input,  q_layer->output_gpu, p_layer->delta_gpu);

        l2_reg_gpu(p_layer->outputs * net->batch, 1 / p_layer->outputs, prob_pred, mean_prob, p_layer->delta_gpu);
        l2_reg_gpu(q_layer->outputs * net->batch, 1 / q_layer->outputs, q_layer->output_gpu, mean_policy, q_layer->delta_gpu);
        backward_network_gpu(*net, state);

        cuda_free(prob_pred);
        cuda_free(state.input);
        cuda_free(mean_policy);
        cuda_free(mean_prob);
        cuda_free(prob_mat_gpu);
    }
    (*net->seen) += game->memory_pos;

    int batch_latent = net->batch;
    net->batch = game->memory_pos;
    update_network_gpu(*net);
    sync_mean_teacher(net, net_inf, 1e-2);
    net->batch = batch_latent;
    return loss / game->memory_pos;
}

float self_gomoku_loop(network *net, network *net_ref, Game *game){
    int num_win = 0;
    int num_lose = 0;

    // allocate memory
    static Game* game_tmp = NULL;
    static float* board_tmp = NULL;
    if(game_tmp == NULL){
        game_tmp = (Game*)xcalloc(net->round, sizeof(Game));
        board_tmp = (float*)xcalloc(3 * game->wh * net->round, sizeof(float));
        for (int r=0; r < net->round; r++) game_tmp_init(game_tmp+r, net);
    }

    // prepare batch
    for (int iter=0; iter < 16; iter++){
        int sbatch_map[net->round];
        int net_play = iter%2;
        for (int hand=0; 1; hand++){
            int sbatch_size=0;
            for (int r=0; r < net->round; r++){
                if (game_tmp[r].winner == 3) sbatch_map[sbatch_size++] = r;
            }
            if (!sbatch_size) break;
            network *net_this = (net_play ^ (hand % 2)) ? net_ref : net;
            int pos;
            if (net_this == net){
                set_batch_network(net_this, sbatch_size);
                float* policy = network_predict(*net_this, board_tmp);
                for (int s=0; s < sbatch_size; s++){
                    int r = sbatch_map[s];
                    game_place(game_tmp + r, player_net_with_prob(policy + s * 2 * game->wh, game_tmp + r));
                    game_tmp[r].winner = game_end(game_tmp + r);
                }
            } else {
                for (int s=0; s < sbatch_size; s++){
                    int r = sbatch_map[s];
                    game_place(game_tmp + r, player_random(game_tmp + r));
                    game_tmp[r].winner = game_end(game_tmp + r);
                }
            }
        }
        for (int r=0; r < net->round; r++) num_win += game_tmp[r].winner==net_play;
        for (int r=0; r < net->round; r++) num_lose += game_tmp[r].winner==!net_play;
        for (int r=0; r < net->round; r++) game_reset(game_tmp+r);
    }

    printf("Win: %d  lose: %d\n", num_win, num_lose);
    return num_win * 100.0 / (num_win  + num_lose);
}

void train_gomoku(char *cfgfile, char *weightfile)
{
    srand(time(0));
    float avg_loss = -1;
    float avg_time = -1;
    char *base = basecfg(cfgfile);
    printf("%s\n", base);
    network net = parse_network_cfg(cfgfile);
    network net_inf = parse_network_cfg_custom(cfgfile, net.round, 0);
    network net_ref = parse_network_cfg_custom("cfg/go_ref.cfg", net.round, 0);
    load_weights(&net_ref, "backup/go_ref.w");
    for (int j = 0; j < net_inf.n; ++j) {
        layer l = net_inf.layers[j];
        if(l.type == CONVOLUTIONAL){
            scal_ongpu(l.nweights, 0, l.weights_gpu, 1);
            scal_ongpu(l.n, 0, l.biases_gpu, 1);
            if (l.batch_normalize){
                scal_add_ongpu(l.n, 0, 1, l.rolling_mean_gpu, 1);
                scal_add_ongpu(l.n, 0, 1, l.rolling_variance_gpu, 1);
            }
        }
    }

    if(weightfile){
        set_batch_network(&net, 1);
        load_weights(&net, weightfile);
        load_weights(&net_inf, weightfile);
        // *net.cur_iteration = 0;
    }
    // Weight init

    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);

#ifdef OPENCV
    // Chart
    mat_cv* img = NULL;
    float max_img_loss = 1;
    int number_of_lines = 100;
    int img_size = 1000;
    char windows_name[100];
    sprintf(windows_name, "chart_%s.png", base);
    img = draw_train_chart(windows_name, max_img_loss, net.max_batches, number_of_lines, img_size, 1, NULL);
#endif
    Game game;
    game_init(&game, &net);
    double time_last_draw=0;
    float best_win = -1;
    while(*net.cur_iteration < net.max_batches){

        double time=what_time_is_it_now();
        float loss = train_single_iter(&net, &net_inf, &game);
        if(avg_loss == -1) avg_loss = loss;
        if(avg_time == -1) avg_time = what_time_is_it_now()-time;
        float time_remaining = (net.max_batches - *net.cur_iteration) * avg_time / 60 / 60;

        (*net.cur_iteration) += 1;
        printf("%d, %3f, %f avg, %f rate, %lf seconds, eta: %fh\n", *net.cur_iteration, loss, avg_loss, get_current_rate(net), avg_time, time_remaining);
        if(*net.cur_iteration % 100 == 0){
            float num_win = self_gomoku_loop(&net, &net_ref, &game);
#ifdef OPENCV
            draw_train_loss(windows_name, img, img_size, avg_loss, max_img_loss, *net.cur_iteration, net.max_batches, num_win/100, 1, "acc", 1, 6006, time_remaining);
#endif
        }
        if(*net.cur_iteration % 1000 == 0){
            char buff[256];
            sprintf(buff, "backup/%s_%08d.backup", base, *net.cur_iteration);
            set_batch_network(&net, 1);
            save_weights(net, buff);
            sprintf(buff, "backup/%s_%08d.backup.stu", base, *net.cur_iteration);
            save_weights(net_inf, buff);
        }
#ifdef OPENCV
        if(what_time_is_it_now() - time_last_draw > 1){
            time_last_draw = what_time_is_it_now();
            draw_train_loss(windows_name, img, img_size, avg_loss, max_img_loss, *net.cur_iteration, net.max_batches, 0, 0, "acc", 1, 6006, time_remaining);
        }
#endif
        avg_loss = avg_loss*.99 + loss*.01;
        avg_time = avg_time*.99 + (what_time_is_it_now() - time)*.01;
    }
    free_network(net);
    free_game(&game);
    free_network(net_ref);
    free(base);
}

void self_gomoku(char *cfgfile, char *weightfile, char *weights_ref)
{
    srand(time(0));
    char *base = basecfg(cfgfile);
    printf("%s\n", base);
    network net = parse_network_cfg_custom(cfgfile, 1, 0);
    network net_ref = parse_network_cfg_custom(cfgfile, 1, 0);
    assert(weightfile);
    load_weights(&net, weightfile);
    assert(weights_ref);
    load_weights(&net_ref, weights_ref);
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);

    Game game;
    game_init(&game, &net);

    self_gomoku_loop(&net, &net_ref, &game);
    
    free_network(net);
    free_network(net_ref);
    free_game(&game);
    free(base);
}

void play_gomoku(char *cfgfile, char *weightfile)
{
    srand(time(0));
    char *base = basecfg(cfgfile);
    printf("%s\n", base);
    network net = parse_network_cfg_custom(cfgfile, 1, 0);
    assert(weightfile);
    load_weights(&net, weightfile);
    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);

    Game game;
    game_init(&game, &net);

    int net_play = 0;
    while (game_end(&game)==3){
        if ((!game.cur_node->player) == net_play){
            game_place(&game, player_net(&net, &game));
        } else {
            int x, y;
            print_grid_char(game.board_c, game.w, game.h);
            do
            {
                printf("Choose a pos:\n");
                scanf("%d %d", &y, &x);
            } while (game.board_c[x + y * game.w]);         
            game_place(&game, x + y * game.w);
        }
    }
    int winner = game_end(&game);

    printf("\nWinenr is : %d\n", winner);
    
    free_network(net);
    free_game(&game);
    free(base);
}

void run_gomoku(int argc, char **argv)
{
    //boards_gomoku();
    if(argc < 4){
        fprintf(stderr, "usage: %s %s [train/self] [cfg] [weights (optional) [weights_reference (optional)]]\n", argv[0], argv[1]);
        return;
    }

    char *cfg = argv[3];
    char *weights = (argc > 4) ? argv[4] : 0;
    char *weights_ref = (argc > 5) ? argv[5] : 0;
    char *c2 = (argc > 5) ? argv[5] : 0;
    char *w2 = (argc > 6) ? argv[6] : 0;
    int multi = find_arg(argc, argv, "-multi");
    if(0==strcmp(argv[2], "train")) train_gomoku(cfg, weights);
    // else if(0==strcmp(argv[2], "valid")) valid_gomoku(cfg, weights, multi);
    else if(0==strcmp(argv[2], "self")) self_gomoku(cfg, weights, weights_ref);
    else if(0==strcmp(argv[2], "play")) play_gomoku(cfg, weights);
    // else if(0==strcmp(argv[2], "test")) test_gomoku(cfg, weights, multi);
    // else if(0==strcmp(argv[2], "engine")) engine_gomoku(cfg, weights, multi);
}
