# -*- coding: utf-8 -*-

from __future__ import print_function
import random
import numpy as np
import torch
import h5py
import threading
import time

from collections import defaultdict, deque
from game_sparse import Board_sparse, Game_sparse
# from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_sparse_root import MCTSPlayer_root
from policy_value_net_sparse import PolicyValueNet_sparse  # Pytorch

from data import Voxel_trainSet, collate_fn
from torch.utils.data import DataLoader
from data import Dataset_Buffer, collate_buffer_fn


import os
os.environ["CUDA_VISIBLE_DEVICES"]= '0'


def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)


# matrix_mean_gflops = np.zeros(32, dtype = float)
# matrix_select_time = np.zeros(32, dtype = int)

class TrainPipeline():
    def __init__(self, init_model=None):
        # params of the board and the game
        
        # self.width = int(kwargs.get('width', 32))
        # self.height = int(kwargs.get('height', 3))
        # self.n_in_row = int(kwargs.get('n_in_row', 3))
        
        self.threads_num = 3
        
        self.board_width = 32
        self.board_height = 3
        self.n_in_row = 3
        
        self.board = []
        for i in range(self.threads_num):
            board = Board_sparse(width=self.board_width,
                            height=self.board_height,
                            n_in_row=self.n_in_row,
                            rank=i,
                            world_size=2)
            self.board.append(board)
        
        self.game = []
        for i in range(self.threads_num):
            game = Game_sparse(self.board[i])
            self.game.append(game)

        # self.game = Game_sparse(self.board)
        
        # training params
        self.learn_rate = 1e-5 #0.0001
        self.lr_multiplier = 1.0  # adaptively adjust the learning rate based on KL
        self.temp = 1.0  # the temperature param
        
        self.n_playout = 50  # num of simulations for each move
        
        self.c_puct = 3
        
        self.buffer_size = 200
        self.batch_size = 8  # mini-batch size for training
        self.data_buffer = deque(maxlen=self.buffer_size)
        self.play_batch_size = 1
        self.epochs = 5  # num of train_steps for each update
        self.kl_targ = 0.02
        self.check_freq = 30
        self.game_batch_num = 1500
        self.best_win_ratio = 0.0
        # num of simulations used for the pure mcts, which is used as
        # the opponent to evaluate the trained policy
        self.pure_mcts_playout_num = 1000
        if init_model:
            # start training from an initial policy-value net
            self.policy_value_net = PolicyValueNet_sparse(self.board_width,
                                                   self.board_height,
                                                   model_file=init_model)
        else:
            # start training from a new policy-value net
            self.policy_value_net = PolicyValueNet_sparse(self.board_width,
                                                   self.board_height)
        
        self.mcts_player = []
        for i in range(self.threads_num):
            mcts_player = MCTSPlayer_root(self.policy_value_net.policy_value_fn,
                                    c_puct=self.c_puct,
                                    n_playout=self.n_playout,
                                    is_selfplay=1)
            self.mcts_player.append(mcts_player)

    # def get_equi_data(self, play_data):
    #     """
    #     Augment the data set by rotation and flipping
    #     play_data: [(state, mcts_prob, winner_z), ..., ...]
    #     """
    #     extend_data = []
    #     for state, mcts_porb, winner in play_data:
    #         for i in [1, 2, 3, 4]:
    #             # rotate counterclockwise
    #             equi_state = np.array([np.rot90(s, i) for s in state])
    #             equi_mcts_prob = np.rot90(np.flipud(
    #                 mcts_porb.reshape(self.board_height, self.board_width)), i)
    #             extend_data.append((equi_state,
    #                                 np.flipud(equi_mcts_prob).flatten(),
    #                                 winner))
    #             # flip horizontally
    #             equi_state = np.array([np.fliplr(s) for s in equi_state])
    #             equi_mcts_prob = np.fliplr(equi_mcts_prob)
    #             extend_data.append((equi_state,
    #                                 np.flipud(equi_mcts_prob).flatten(),
    #                                 winner))
    #     return extend_data

    def collect_selfplay_data(self, sparse_name, sparse_matrix, mean_gflops, std_gflops, rank, n_games=1, ):
        """collect self-play data for training"""
        
        for i in range(n_games):
            # winner, play_data = self.game.start_self_play(self.mcts_player,
            #                                               temp=self.temp)
            
            play_data, parameters, init_gflops, last_gflops = self.game[rank].start_self_play(self.mcts_player[rank],
                                                                sparse_name,
                                                                sparse_matrix,
                                                                mean_gflops,
                                                                std_gflops,
                                                                temp=self.temp)
            torch.cuda.empty_cache()
            play_data = list(play_data)[:]
            
            # for j in range(len(play_data)):
                # print(play_data[j][3])
            # print('ss')
            # input()
            
            print('#parameters: ', sparse_name, " ", parameters[0], " ", parameters[1], " ", parameters[2], 
                    " #initial:", init_gflops, " #final:", last_gflops)
            
            self.episode_len = len(play_data)
            # augment the data
            # play_data = self.get_equi_data(play_data)
            normalized_gflops = self.game[rank].board.std_gflops
            if ( (last_gflops - init_gflops) / normalized_gflops < -0.4 ):
                if (random.random() < 0.1):
                    threadLock.acquire()
                    self.data_buffer.extend(play_data)
                    threadLock.release()
            else:
                threadLock.acquire()
                self.data_buffer.extend(play_data)
                threadLock.release()
    
            
    def policy_update(self):
        """update the policy-value net"""
        mini_batch = random.sample(self.data_buffer, self.batch_size)
        state_batch = [data[0] for data in mini_batch]
        mcts_probs_batch = [data[1] for data in mini_batch]
        mcts_value_probs_batch = [data[2] for data in mini_batch]
        winner_batch = [data[3] for data in mini_batch]
        move_batch = [data[4] for data in mini_batch]
        
        # old_probs, old_v = self.policy_value_net.policy_value(state_batch)
        # old_policy_prob, (old_policy_value1, old_policy_value2, old_policy_value3), old_v = self.policy_value_net.policy_value(state_batch)
        old_policy_prob, old_policy_value, old_v = self.policy_value_net.policy_value(state_batch)
        
        # print(old_policy_value.shape)
        old_policy_value = np.stack(old_policy_value, axis=1)
        old_policy_value = old_policy_value.reshape(-1, old_policy_value.shape[-1])
        torch.cuda.empty_cache()
        
        for i in range(self.epochs):
            value_loss, policy_loss, policy_value_loss, entropy = self.policy_value_net.train_step(
                    state_batch,
                    mcts_probs_batch,
                    mcts_value_probs_batch,
                    winner_batch,
                    move_batch,
                    self.learn_rate*self.lr_multiplier)
            loss = value_loss + policy_loss + policy_value_loss
            # new_probs, new_v = self.policy_value_net.policy_value(state_batch)
            # new_policy_prob, (new_policy_value1, new_policy_value2, new_policy_value3), new_v = self.policy_value_net.policy_value(state_batch)
            new_policy_prob, new_policy_value, new_v = self.policy_value_net.policy_value(state_batch)
            
            kl_policy = np.mean(np.sum(old_policy_prob * (
                    np.log(old_policy_prob + 1e-10) - np.log(new_policy_prob + 1e-10)),
                    axis=1)
            )
            
            # old_selection = np.argmax(old_policy_prob, axis=-1)
            # new_selection = np.argmax(new_policy_prob, axis=-1)
            
            # kl_policy_value = np.mean(np.sum(old_policy_value[old_selection] * (
            #         np.log(old_policy_value[old_selection] + 1e-10) - np.log(new_policy_value[new_selection] + 1e-10)),
            #         axis=1)
            # )
            
            new_policy_value = np.stack(new_policy_value, axis=1)
            new_policy_value = new_policy_value.reshape(-1, new_policy_value.shape[-1])
            
            policy_selection = np.array(move_batch)
            batch_accumulate_base = np.arange(policy_selection.shape[0]) * 3
            policy_selection_index = batch_accumulate_base + policy_selection
            
            kl_policy_value = np.mean(np.sum(old_policy_value[policy_selection_index] * (
                    np.log(old_policy_value[policy_selection_index] + 1e-10) - np.log(new_policy_value[policy_selection_index] + 1e-10)),
                    axis=1)
            )
            
            kl = kl_policy + kl_policy_value
            torch.cuda.empty_cache()
            
            
            if kl > self.kl_targ * 4:  # early stopping if D_KL diverges badly
                break
        # adaptively adjust the learning rate
        if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
            self.lr_multiplier /= 1.5
        elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
            self.lr_multiplier *= 1.5

        explained_var_old = (1 -
                             np.var(np.array(winner_batch) - old_v.flatten()) /
                             np.var(np.array(winner_batch)))
        explained_var_new = (1 -
                             np.var(np.array(winner_batch) - new_v.flatten()) /
                             np.var(np.array(winner_batch)))
        print((
            #     "kl:{:.5f},  "
            #    "kl_policy: {:.5f}  "
            #    "kl_p_value: {:.5f}  "
               "lr_multiplier:{:.3f},  "
               "loss:{:.3f},  "
               "value_loss:{:.3f},  "
               "policy_loss:{:.3f},  "
               "policu_value_loss:{:.3f},  "
            #    "entropy:{:.3f},  "
            #    "explained_var_old:{:.3f},  "
            #    "explained_var_new:{:.3f} "
               ).format(
                        # kl,
                        # kl_policy,
                        # kl_policy_value,
                        self.lr_multiplier,
                        loss,
                        value_loss,
                        policy_loss,
                        policy_value_loss,
                        # entropy,
                        # explained_var_old,
                        # explained_var_new
                        ))
        return loss, entropy


    def run(self):
        """run the training pipeline"""
                
        f = open(f'sample_data_200.txt')
        sparse_name_list = []
        for line_id, line in enumerate(f.readlines()):
            sparse_name = line.strip('\n')
            sparse_name_list.append(sparse_name)
        f.close

        sparse_name_list = [
                            sparse_name_list[0] , sparse_name_list[1], 
                            sparse_name_list[2] , sparse_name_list[3],
                            # sparse_name_list[4] , sparse_name_list[5], sparse_name_list[6] , sparse_name_list[7],
                            
                            sparse_name_list[50] , sparse_name_list[51], 
                            sparse_name_list[52] , sparse_name_list[53],
                            # sparse_name_list[54] , sparse_name_list[55], sparse_name_list[56] , sparse_name_list[57],
                            
                            sparse_name_list[100] , sparse_name_list[101], 
                            sparse_name_list[102] , sparse_name_list[103],
                            # sparse_name_list[104] , sparse_name_list[105], sparse_name_list[106] , sparse_name_list[107],
                            
                            sparse_name_list[150] , sparse_name_list[151], 
                            sparse_name_list[152] , sparse_name_list[153],
                            # sparse_name_list[154] , sparse_name_list[155], sparse_name_list[156] , sparse_name_list[157],
                            ]

        print("#matrix size: ", len(sparse_name_list))
        print(sparse_name_list)
        # input()

        # matrix_mean_gflops = pt.zeros(32, dtype = float)
        # matrix_select_time = pt.zeros(32, dtype = int)

        matrix_best_configure = torch.tensor([[[3, 7, 1], #512, 256, 2
                                            [2, 1, 0]] for i in range(32)]) # sequence select
        matrix_best_reward = torch.zeros(32, dtype = float)


        # index_array = np.array([
        #                         0, 1, 2, 3, 4, 5, 6, 7,
        #                         50, 51, 52, 53, 54, 55, 56, 57, 
        #                         100, 101, 102, 103, 104, 105, 106, 107,
        #                         150, 151, 152, 153, 154, 155, 156, 157,
        #                         ])
        
        # matrix_mean_gflops_list = [11.60, 13.74, 26.984, 23.18, 58.87]
        # matrix_std_gflops_list = [2.012, 1.581, 8.839, 6.674, 14.926]
        
        
        
        mean_gflops_list = [12.0, 26.90, 60.81,]
        std_gflops_list = [3.28, 27.72, 37.76,]
        
        

        # # index_array = np.array([x for x in range(200, 400)])

        # datafn = f'../data/200data_large_for_csv.hdf5'
        # print('#loading dataset', datafn, '...')
        # cache = h5py.File(datafn, 'r')
        # density_map_all = cache['density_map'][index_array]
        # print('#density_map:', density_map_all.shape, density_map_all.dtype)
        # manual_feature_all = cache['manual_feature'][index_array]
        # print('#manual_feature:', manual_feature_all.shape, manual_feature_all.dtype)
        # nnz_number_all = cache['nnz_number'][index_array]
        # nnz_distribution_all = cache['nnz_row_distribution'][index_array]
        # cache.close()

        ########------------------------------------------------------------------

        voxel_datafn = '../data/16_voxel.h5py'
        print('#loading dataset', voxel_datafn, '...')
        cache = h5py.File(voxel_datafn, 'r')

        csr_store_length = cache['csr_store_length'][()]
        feature_buffer_all = cache['feature_buffer'][()]
        coordinate_buffer_all = cache['coordinate_buffer'][()]
        number_buffer_all = cache['number_buffer'][()]
        density_buffer_all = cache['density_buffer'][()]
        nnz_number_all = cache['nnz_number'][()]
        
        # density_map_all = cache['density_map'][()]
        # density_local_map_all = cache['density_local_map'][()]
        # density_map_row_all = cache['density_map_row'][()]
        # density_map_col_all = cache['density_map_col'][()]

        cache.close()
        
        
        trainset = Voxel_trainSet(csr_store_length, feature_buffer_all, coordinate_buffer_all, number_buffer_all, 
                                density_buffer_all, nnz_number_all, sparse_name_list)
        # trainloader = DataLoader(trainset, batch_size=batch_iterate_times, shuffle=True, collate_fn=collate_fn)
        
        
        # for itera, batch_data in enumerate(trainloader):
            #---------------
            # t_record = time.perf_counter()
            #-------------------------
            # value_loss_supervised = 0
            # policy_loss_supervised = 0
            # mean_gflops_list = []
            
            # for batch_itera in range(batch_iterate_times):
                
            #     #--------------------------------------------
            #     idx = batch_data['idx'][batch_itera]
            #     sparse_name = batch_data['sparse_name'][batch_itera]
            #     # density_map = batch_data['density_map'][batch_itera]
            #     # density_local_map =  batch_data['density_local_map'][batch_itera]
            #     nnz_number = batch_data['nnz_num'][batch_itera]
                
            #     feature_buffer = batch_data['feature_buffer'][batch_itera: batch_itera+1]
            #     coordinate_buffer = batch_data['coordinate_buffer'][batch_itera: batch_itera+1]
            #     density_buffer = batch_data['density_buffer'][batch_itera: batch_itera+1]
        
        
        #------------------    
        
        threads = []
        
        #-------------------
        
        try:
            for i in range(self.game_batch_num):
                # for j in range(self.threads_num):
                #     t = threading.Thread(target=self.collect_selfplay_data,args=(trainset[4 * j]['sparse_name'], trainset[4 * j], 
                #                                                                  matrix_mean_gflops_list[j], 
                #                                                                  matrix_std_gflops_list[j],
                #                                                                  j, self.play_batch_size))
                #     t.start()
                #     threads.append(t)
                # for t in threads:
                #     t.join()
                    
                self.collect_selfplay_data(trainset[8]['sparse_name'], trainset[8], 
                                           mean_gflops_list[2], std_gflops_list[2], 
                                           0, self.play_batch_size)

                print("batch i:{}, episode_len:{}, data_buffer:{}".format(i+1, self.episode_len, len(self.data_buffer)))
                if len(self.data_buffer) > self.batch_size:
                    loss, entropy = self.policy_update()
                    torch.cuda.empty_cache()
                # # check the performance of the current model,
                # # and save the model params
                # if (i+1) % self.check_freq == 0:
                #     print("current self-play batch: {}".format(i+1))
                #     # win_ratio = self.policy_evaluate()
                #     self.policy_value_net.save_model(f'./chk/current_policy_{str(i+1).zfill(3)}.model')
                if (i+1) % self.check_freq == 0:
                    print("current self-play batch: {}".format(i+1))
                    # win_ratio = self.policy_evaluate()
                    self.policy_value_net.save_model(f'../chk/current_policy_{str(i+1).zfill(3)}_c{self.c_puct}_n{self.n_playout}_di08.model')
                    
                #     if win_ratio > self.best_win_ratio:
                #         print("New best policy!!!!!!!!")
                #         self.best_win_ratio = win_ratio
                #         # update the best_policy
                #         self.policy_value_net.save_model('./best_policy.model')
                #         if (self.best_win_ratio == 1.0 and
                #                 self.pure_mcts_playout_num < 5000):
                #             self.pure_mcts_playout_num += 1000
                #             self.best_win_ratio = 0.0
        except KeyboardInterrupt:
            print('\n\rquit')


if __name__ == '__main__':
    setup_seed(20230422)
    batch_iterate_times = 1
    threadLock = threading.Lock()
    # model_path = f'./chk/previous/current_policy_300_c5_buffer200_di08.model'
    # training_pipeline = TrainPipeline(init_model=model_path)
    
    training_pipeline = TrainPipeline()
    training_pipeline.run()
