# -*- coding: utf-8 -*-
# nohup python -u train_sparsenet_muldata.py > nohup.out 2>&1 &

from __future__ import print_function
import random
import numpy as np
import torch
import h5py
import threading
import time

from collections import defaultdict, deque
from game_sparse import Board_sparse, Game_sparse
# from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_sparse_root import MCTSPlayer_root
from policy_value_net_sparse import PolicyValueNet_sparse  # Pytorch

from data import Voxel_trainSet, collate_fn
from torch.utils.data import DataLoader
from data import Dataset_Buffer_load, collate_buffer_fn_load

from constant import priority_binary_buffer_insert

import os
os.environ["CUDA_VISIBLE_DEVICES"]= '0'


def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)


# matrix_mean_gflops = np.zeros(32, dtype = float)
# matrix_select_time = np.zeros(32, dtype = int)

class TrainPipeline():
    def __init__(self, data_number, init_model=None, init_data=None):
        # params of the board and the game
        
        # self.width = int(kwargs.get('width', 32))
        # self.height = int(kwargs.get('height', 3))
        # self.n_in_row = int(kwargs.get('n_in_row', 3))
        
        self.threads_num = 4
        self.data_number = data_number
        
        self.board_width = 32
        self.board_height = 3
        self.n_in_row = 3
        
        self.board = []
        for i in range(self.threads_num):
            board = Board_sparse(width=self.board_width,
                            height=self.board_height,
                            n_in_row=self.n_in_row,
                            rank=i,
                            world_size=2)
            self.board.append(board)
        
        self.game = []
        for i in range(self.threads_num):
            game = Game_sparse(self.board[i])
            self.game.append(game)

        # self.game = Game_sparse(self.board)
        
        # training params
        self.learn_rate = 1e-5 #0.0001
        self.lr_multiplier = 1.0  # adaptively adjust the learning rate based on KL
        self.temp = 1.0  # the temperature param
        
        self.n_playout = 200  # num of simulations for each move
        
        self.c_puct = 3
        
        self.buffer_size = 50
        self.batch_size = 16  # mini-batch size for training
        
        
        self.data_buffer = []
        for i in range(self.data_number):
            self.data_buffer.append([])
        
        self.start_itera = 0
        if init_data:
            self.load_data(init_data)
        
        
        # self.data_buffer = deque(maxlen=self.buffer_size)
        
        self.play_batch_size = 1
        self.epochs = 5  # num of train_steps for each update
        self.kl_targ = 0.02
        self.check_freq = 5
        self.game_batch_num = 1500
        self.best_win_ratio = 0.0
        # num of simulations used for the pure mcts, which is used as
        # the opponent to evaluate the trained policy
        self.pure_mcts_playout_num = 1000
        if init_model:
            # start training from an initial policy-value net
            self.policy_value_net = PolicyValueNet_sparse(self.board_width,
                                                   self.board_height,
                                                   model_file=init_model)
        else:
            # start training from a new policy-value net
            self.policy_value_net = PolicyValueNet_sparse(self.board_width,
                                                   self.board_height)
        
        self.mcts_player = []
        for i in range(self.threads_num):
            mcts_player = MCTSPlayer_root(self.policy_value_net.policy_value_fn,
                                    c_puct=self.c_puct,
                                    n_playout=self.n_playout,
                                    _is_self_play=1)
            self.mcts_player.append(mcts_player)
            
        print('#data_buffer_each_data: ', self.buffer_size)
        print('#c_puct: ', self.c_puct)
        print('#n_simu: ', self.n_playout)

    # def get_equi_data(self, play_data):
    #     """
    #     Augment the data set by rotation and flipping
    #     play_data: [(state, mcts_prob, winner_z), ..., ...]
    #     """
    #     extend_data = []
    #     for state, mcts_porb, winner in play_data:
    #         for i in [1, 2, 3, 4]:
    #             # rotate counterclockwise
    #             equi_state = np.array([np.rot90(s, i) for s in state])
    #             equi_mcts_prob = np.rot90(np.flipud(
    #                 mcts_porb.reshape(self.board_height, self.board_width)), i)
    #             extend_data.append((equi_state,
    #                                 np.flipud(equi_mcts_prob).flatten(),
    #                                 winner))
    #             # flip horizontally
    #             equi_state = np.array([np.fliplr(s) for s in equi_state])
    #             equi_mcts_prob = np.fliplr(equi_mcts_prob)
    #             extend_data.append((equi_state,
    #                                 np.flipud(equi_mcts_prob).flatten(),
    #                                 winner))
    #     return extend_data

    # def collect_selfplay_data(self, sparse_name, sparse_matrix, mean_gflops, std_gflops, rank, n_games=1, ):
    #     """collect self-play data for training"""
        
    #     for i in range(n_games):
    #         # winner, play_data = self.game.start_self_play(self.mcts_player,
    #         #                                               temp=self.temp)
            
    #         play_data, parameters, init_gflops, last_gflops = self.game[rank].start_self_play(self.mcts_player[rank],
    #                                                             sparse_name,
    #                                                             sparse_matrix,
    #                                                             mean_gflops,
    #                                                             std_gflops,
    #                                                             temp=self.temp)
    #         torch.cuda.empty_cache()
    #         play_data = list(play_data)[:]
            
    #         # for j in range(len(play_data)):
    #             # print(play_data[j][3])
    #         # print('ss')
    #         # input()
            
    #         print('#parameters: ', sparse_name, " ", parameters[0], " ", parameters[1], " ", parameters[2], 
    #                 " #initial:", init_gflops, " #final:", last_gflops)
            
    #         self.episode_len = len(play_data)
    #         # augment the data
    #         # play_data = self.get_equi_data(play_data)
    #         normalized_gflops = self.game[rank].board.std_gflops
    #         if ( (last_gflops - init_gflops) / normalized_gflops < -0.4 ):
    #             if (random.random() < 0.1):
    #                 threadLock.acquire()
    #                 self.data_buffer.extend(play_data)
    #                 threadLock.release()
    #         else:
    #             threadLock.acquire()
    #             self.data_buffer.extend(play_data)
    #             threadLock.release()
            
            
            
    #-----
    
    def collect_selfplay_data_databuffer(self, sparse_name, sparse_matrix, mean_gflops, std_gflops, idx, rank, n_games=1, ):
        """collect self-play data for training"""
        
        for i in range(n_games):
            # winner, play_data = self.game.start_self_play(self.mcts_player,
            #                                               temp=self.temp)
            play_data, parameters, init_gflops, last_gflops = self.game[rank].start_self_play(self.mcts_player[rank],
                                                                sparse_name,
                                                                sparse_matrix,
                                                                mean_gflops,
                                                                std_gflops,
                                                                temp=self.temp)
            torch.cuda.empty_cache()
            play_data = list(play_data)[:]
            
            # for j in range(len(play_data)):
                # print(play_data[j][3])
            # print('ss')
            # input()
            # zip(states, mcts_probs, mcts_value_probs, future_reward_list, actual_move, priority_compare_list)
            play_data = [[x[0][1], x[1], x[2], x[3], x[4], x[5], idx]  for x in play_data]
            
            print('#parameters: ', sparse_name, " ", parameters[0], " ", parameters[1], " ", parameters[2], 
                    " #initial:", init_gflops, " #final:", last_gflops)
            
            self.episode_len = len(play_data)
            # augment the data
            # play_data = self.get_equi_data(play_data)
            
            # normalized_gflops = self.game[rank].board.std_gflops
            # if ( (last_gflops - init_gflops) / normalized_gflops < -0.4 ):
            #     if (random.random() < 0.1):
            #         threadLock.acquire()
            #         self.data_buffer.extend(play_data)
            #         threadLock.release()
            # else:
            #     threadLock.acquire()
            #     self.data_buffer.extend(play_data)
            #     threadLock.release()
            
            compare_tmp = (self.game[rank].board.last_gflops - self.game[rank].board.init_gflops) / self.game[rank].board.std_gflops
            assert compare_tmp == play_data[0][5]
            
            self.data_buffer[idx] = priority_binary_buffer_insert(self.data_buffer[idx], compare_tmp, play_data, self.buffer_size)
            
            # threadLock.acquire()
            '''
            if (len(self.data_buffer[rank]) == 0):
                priority_binary_buffer_insert(self.data_buffer[rank], compare_tmp, play_data, self.buffer_size)
            # threadLock.release()
            
            else:           
                normalized_gflops = self.game[rank].board.std_gflops
                if ( (last_gflops - init_gflops) / normalized_gflops < -0.4 ):
                    if (random.random() < 0.1):
                        # threadLock.acquire()
                        # self.data_buffer.extend(play_data)
                        # threadLock.release()
                        priority_binary_buffer_insert(self.data_buffer[rank], compare_tmp, play_data, self.buffer_size)
                else:
                    priority_binary_buffer_insert(self.data_buffer[rank], compare_tmp, play_data, self.buffer_size)
            '''
    
    #-----
    
    def policy_update_databuffer(self):
        """update the policy-value net"""
        
        # buffer_length_list = []
        # for i in range(self.data_number):
        #     buffer_length_list.append(len(self.data_buffer[i]))
        # max_buffer_length = max(buffer_length_list)
        
        # buffer_length_normal_list = max_buffer_length / np.array(buffer_length_list) 
        # buffer_length_normal_list = np.array(buffer_length_normal_list, dtype=int)
        
        # data_buffer_all = []
        # for i in range(self.threads_num):
        #     for j in range(buffer_length_normal_list[i]):
        #         data_buffer_all = data_buffer_all + self.data_buffer[i]
        
        data_buffer_all = []
        for i in range(self.data_number):
            data_buffer_all = data_buffer_all + self.data_buffer[i]
        
        dataset_buffer = Dataset_Buffer_load(data_buffer_all, self.data_buffer_matrix)
        dataload_buffer = DataLoader(dataset_buffer, shuffle=True, batch_size=self.batch_size, collate_fn=collate_buffer_fn_load)
        
        for batch_data in dataload_buffer:
        
            # mini_batch = random.sample(self.data_buffer, self.batch_size)
            # state_batch = [data[0] for data in mini_batch]
            # mcts_probs_batch = [data[1] for data in mini_batch]
            # mcts_value_probs_batch = [data[2] for data in mini_batch]
            # winner_batch = [data[3] for data in mini_batch]
            # move_batch = [data[4] for data in mini_batch]
            
            # state_batch = list(zip(batch_data[1], batch_data[0][0]))
            # mcts_probs_batch = batch_data[0][1]
            # mcts_value_probs_batch = batch_data[0][2]
            # winner_batch = batch_data[0][3]
            # move_batch = batch_data[0][4]
            
            state_batch = batch_data[0]
            mcts_probs_batch = batch_data[1]
            mcts_value_probs_batch = batch_data[2]
            winner_batch = batch_data[3]
            move_batch = batch_data[4]
            
            # old_probs, old_v = self.policy_value_net.policy_value(state_batch)
            # old_policy_prob, (old_policy_value1, old_policy_value2, old_policy_value3), old_v = self.policy_value_net.policy_value(state_batch)
            old_policy_prob, old_policy_value, old_v = self.policy_value_net.policy_value(state_batch)
            
            # print(old_policy_value.shape)
            old_policy_value = np.stack(old_policy_value, axis=1)
            old_policy_value = old_policy_value.reshape(-1, old_policy_value.shape[-1])
            torch.cuda.empty_cache()
            
            for i in range(self.epochs):
                value_loss, policy_loss, policy_value_loss, entropy = self.policy_value_net.train_step(
                        state_batch,
                        mcts_probs_batch,
                        mcts_value_probs_batch,
                        winner_batch,
                        move_batch,
                        self.learn_rate*self.lr_multiplier)
                loss = value_loss + policy_loss + policy_value_loss
                # new_probs, new_v = self.policy_value_net.policy_value(state_batch)
                # new_policy_prob, (new_policy_value1, new_policy_value2, new_policy_value3), new_v = self.policy_value_net.policy_value(state_batch)
                new_policy_prob, new_policy_value, new_v = self.policy_value_net.policy_value(state_batch)
                
                kl_policy = np.mean(np.sum(old_policy_prob * (
                        np.log(old_policy_prob + 1e-10) - np.log(new_policy_prob + 1e-10)),
                        axis=1)
                )
                
                # old_selection = np.argmax(old_policy_prob, axis=-1)
                # new_selection = np.argmax(new_policy_prob, axis=-1)
                
                # kl_policy_value = np.mean(np.sum(old_policy_value[old_selection] * (
                #         np.log(old_policy_value[old_selection] + 1e-10) - np.log(new_policy_value[new_selection] + 1e-10)),
                #         axis=1)
                # )
                
                new_policy_value = np.stack(new_policy_value, axis=1)
                new_policy_value = new_policy_value.reshape(-1, new_policy_value.shape[-1])
                
                policy_selection = np.array(move_batch)
                batch_accumulate_base = np.arange(policy_selection.shape[0]) * 3
                policy_selection_index = batch_accumulate_base + policy_selection
                
                kl_policy_value = np.mean(np.sum(old_policy_value[policy_selection_index] * (
                        np.log(old_policy_value[policy_selection_index] + 1e-10) - np.log(new_policy_value[policy_selection_index] + 1e-10)),
                        axis=1)
                )
                
                kl = kl_policy + kl_policy_value
                torch.cuda.empty_cache()
                
                
                if kl > self.kl_targ * 4:  # early stopping if D_KL diverges badly
                    break
            # adaptively adjust the learning rate
            if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
                self.lr_multiplier /= 1.5
            elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
                self.lr_multiplier *= 1.5

            explained_var_old = (1 -
                                np.var(np.array(winner_batch) - old_v.flatten()) /
                                np.var(np.array(winner_batch)))
            explained_var_new = (1 -
                                np.var(np.array(winner_batch) - new_v.flatten()) /
                                np.var(np.array(winner_batch)))
            print((
                #     "kl:{:.5f},  "
                #    "kl_policy: {:.5f}  "
                #    "kl_p_value: {:.5f}  "
                "lr_multiplier:{:.3f},  "
                "loss:{:.3f},  "
                "value_loss:{:.3f},  "
                "policy_loss:{:.3f},  "
                "policu_value_loss:{:.3f},  "
                #    "entropy:{:.3f},  "
                #    "explained_var_old:{:.3f},  "
                #    "explained_var_new:{:.3f} "
                ).format(
                            # kl,
                            # kl_policy,
                            # kl_policy_value,
                            self.lr_multiplier,
                            loss,
                            value_loss,
                            policy_loss,
                            policy_value_loss,
                            # entropy,
                            # explained_var_old,
                            # explained_var_new
                            ))
        return loss, entropy

            
    # def policy_update(self):
    #     """update the policy-value net"""
    #     mini_batch = random.sample(self.data_buffer, self.batch_size)
    #     state_batch = [data[0] for data in mini_batch]
    #     mcts_probs_batch = [data[1] for data in mini_batch]
    #     mcts_value_probs_batch = [data[2] for data in mini_batch]
    #     winner_batch = [data[3] for data in mini_batch]
    #     move_batch = [data[4] for data in mini_batch]
        
    #     # old_probs, old_v = self.policy_value_net.policy_value(state_batch)
    #     # old_policy_prob, (old_policy_value1, old_policy_value2, old_policy_value3), old_v = self.policy_value_net.policy_value(state_batch)
    #     old_policy_prob, old_policy_value, old_v = self.policy_value_net.policy_value(state_batch)
        
    #     # print(old_policy_value.shape)
    #     old_policy_value = np.stack(old_policy_value, axis=1)
    #     old_policy_value = old_policy_value.reshape(-1, old_policy_value.shape[-1])
    #     torch.cuda.empty_cache()
        
    #     for i in range(self.epochs):
    #         value_loss, policy_loss, policy_value_loss, entropy = self.policy_value_net.train_step(
    #                 state_batch,
    #                 mcts_probs_batch,
    #                 mcts_value_probs_batch,
    #                 winner_batch,
    #                 move_batch,
    #                 self.learn_rate*self.lr_multiplier)
    #         loss = value_loss + policy_loss + policy_value_loss
    #         # new_probs, new_v = self.policy_value_net.policy_value(state_batch)
    #         # new_policy_prob, (new_policy_value1, new_policy_value2, new_policy_value3), new_v = self.policy_value_net.policy_value(state_batch)
    #         new_policy_prob, new_policy_value, new_v = self.policy_value_net.policy_value(state_batch)
            
    #         kl_policy = np.mean(np.sum(old_policy_prob * (
    #                 np.log(old_policy_prob + 1e-10) - np.log(new_policy_prob + 1e-10)),
    #                 axis=1)
    #         )
            
    #         # old_selection = np.argmax(old_policy_prob, axis=-1)
    #         # new_selection = np.argmax(new_policy_prob, axis=-1)
            
    #         # kl_policy_value = np.mean(np.sum(old_policy_value[old_selection] * (
    #         #         np.log(old_policy_value[old_selection] + 1e-10) - np.log(new_policy_value[new_selection] + 1e-10)),
    #         #         axis=1)
    #         # )
            
    #         new_policy_value = np.stack(new_policy_value, axis=1)
    #         new_policy_value = new_policy_value.reshape(-1, new_policy_value.shape[-1])
            
    #         policy_selection = np.array(move_batch)
    #         batch_accumulate_base = np.arange(policy_selection.shape[0]) * 3
    #         policy_selection_index = batch_accumulate_base + policy_selection
            
    #         kl_policy_value = np.mean(np.sum(old_policy_value[policy_selection_index] * (
    #                 np.log(old_policy_value[policy_selection_index] + 1e-10) - np.log(new_policy_value[policy_selection_index] + 1e-10)),
    #                 axis=1)
    #         )
            
    #         kl = kl_policy + kl_policy_value
    #         torch.cuda.empty_cache()
            
            
    #         if kl > self.kl_targ * 4:  # early stopping if D_KL diverges badly
    #             break
    #     # adaptively adjust the learning rate
    #     if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
    #         self.lr_multiplier /= 1.5
    #     elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
    #         self.lr_multiplier *= 1.5

    #     explained_var_old = (1 -
    #                          np.var(np.array(winner_batch) - old_v.flatten()) /
    #                          np.var(np.array(winner_batch)))
    #     explained_var_new = (1 -
    #                          np.var(np.array(winner_batch) - new_v.flatten()) /
    #                          np.var(np.array(winner_batch)))
    #     print((
    #         #     "kl:{:.5f},  "
    #         #    "kl_policy: {:.5f}  "
    #         #    "kl_p_value: {:.5f}  "
    #            "lr_multiplier:{:.3f},  "
    #            "loss:{:.3f},  "
    #            "value_loss:{:.3f},  "
    #            "policy_loss:{:.3f},  "
    #            "policu_value_loss:{:.3f},  "
    #         #    "entropy:{:.3f},  "
    #         #    "explained_var_old:{:.3f},  "
    #         #    "explained_var_new:{:.3f} "
    #            ).format(
    #                     # kl,
    #                     # kl_policy,
    #                     # kl_policy_value,
    #                     self.lr_multiplier,
    #                     loss,
    #                     value_loss,
    #                     policy_loss,
    #                     policy_value_loss,
    #                     # entropy,
    #                     # explained_var_old,
    #                     # explained_var_new
    #                     ))
    #     return loss, entropy


    def save_data(self, data_file, save_itera):
        """ save data to file """
   
        # torch.save(net_params, model_file)
        data_number = len(self.data_buffer)
        assert self.data_number == data_number
        data_buffer = self.data_buffer
        
        
        # zip(states, mcts_probs, mcts_value_probs, future_reward_list, actual_move, priority_compare_list)
        # states_all = []
        # mcts_probs_all = []
        # mcts_value_probs_all = []
        # future_reward_list_all = []
        # actual_move_all = []
        # priority_compare_list_all = []
        data_save_buffer = [[] for i in range(7)]  
        each_data_length = []
        
        for i in range(data_number):
            data_buffer_tmp = list(zip(*data_buffer[i]))
            each_data_length.append(len(data_buffer[i]))
            for j in range(len(data_buffer_tmp)):
                data_save_buffer[j].extend(data_buffer_tmp[j])
        
        params = {
            'states':       np.array(data_save_buffer[0]),
            'mcts_probs':   np.array(data_save_buffer[1]),
            'mcts_value_probs':     np.array(data_save_buffer[2]),
            'future_reward_list':   np.array(data_save_buffer[3]),
            'actual_move':          np.array(data_save_buffer[4]),
            'priority_compare_list':np.array(data_save_buffer[5]),
            'idx':                  np.array(data_save_buffer[6]),
            'each_data_length':     np.array(each_data_length),
            'start_itera':          save_itera, 
            
        }
        
        torch.save(params, data_file)
    
    
    def load_data(self, data_file):
        """ load data to file """
   
        data_load = torch.load(data_file)
        
        states = data_load['states']
        mcts_probs = data_load['mcts_probs']
        mcts_value_probs = data_load['mcts_value_probs']
        future_reward_list = data_load['future_reward_list']
        actual_move = data_load['actual_move']
        priority_compare_list = data_load['priority_compare_list']
        idx = data_load['idx']
        
        each_data_length = data_load['each_data_length']
        self.start_itera = data_load['start_itera']
        # self.start_itera = 19
        print('##start_itera: ', self.start_itera)
        
        
        data_buffer = list(zip(states, mcts_probs, mcts_value_probs, future_reward_list, actual_move, priority_compare_list, idx))
        
        # print(len(data_buffer))
        # print(each_data_length)
        # input()
        
        cumu_index = 0
        for i in range(self.data_number):
            self.data_buffer[i].extend(data_buffer[cumu_index: cumu_index + each_data_length[i]])
            cumu_index = cumu_index + each_data_length[i]
        
        #         params = {
        #     'states':       np.array(data_save_buffer[0]),
        #     'mcts_probs':   np.array(data_save_buffer[1]),
        #     'mcts_value_probs':     np.array(data_save_buffer[2]),
        #     'future_reward_list':   np.array(data_save_buffer[3]),
        #     'actual_move':          np.array(data_save_buffer[4]),
        #     'priority_compare_list':np.array(data_save_buffer[5]),
        # }
        

    def run(self):
        """run the training pipeline"""
                
        f = open(f'sample_data_200.txt')
        sparse_name_list = []
        for line_id, line in enumerate(f.readlines()):
            sparse_name = line.strip('\n')
            sparse_name_list.append(sparse_name)
        f.close

        sparse_name_list = [
                            sparse_name_list[0] , sparse_name_list[1], 
                            sparse_name_list[2] , sparse_name_list[3],
                            # sparse_name_list[4] , sparse_name_list[5], sparse_name_list[6] , sparse_name_list[7],
                            
                            sparse_name_list[50] , sparse_name_list[51], 
                            sparse_name_list[52] , sparse_name_list[53],
                            # sparse_name_list[54] , sparse_name_list[55], sparse_name_list[56] , sparse_name_list[57],
                            
                            sparse_name_list[100] , sparse_name_list[101], 
                            sparse_name_list[102] , sparse_name_list[103],
                            # sparse_name_list[104] , sparse_name_list[105], sparse_name_list[106] , sparse_name_list[107],
                            
                            sparse_name_list[150] , sparse_name_list[151], 
                            sparse_name_list[152] , sparse_name_list[153],
                            # sparse_name_list[154] , sparse_name_list[155], sparse_name_list[156] , sparse_name_list[157],
                            ]

        print("#matrix size: ", len(sparse_name_list))
        print(sparse_name_list)
        # input()

        # matrix_mean_gflops = pt.zeros(32, dtype = float)
        # matrix_select_time = pt.zeros(32, dtype = int)

        # matrix_best_configure = torch.tensor([[[3, 7, 1], #512, 256, 2
        #                                     [2, 1, 0]] for i in range(32)]) # sequence select
        # matrix_best_reward = torch.zeros(32, dtype = float)


        # index_array = np.array([
        #                         0, 1, 2, 3, 4, 5, 6, 7,
        #                         50, 51, 52, 53, 54, 55, 56, 57, 
        #                         100, 101, 102, 103, 104, 105, 106, 107,
        #                         150, 151, 152, 153, 154, 155, 156, 157,
        #                         ])
                
        
        
        # mean_gflops_list = [12.0, 26.90, 60.81,]
        # std_gflops_list = [3.28, 27.72, 37.76,]
        
        mean_gflops_list = [12.10, 11.40, 12.69, 12.82, 
                            21.85, 18.19, 27.96, 32.21, 
                            63.29, 34.66, 45.10, 34.87, 
                            70.82, 111.83, 95.46, 114.74]
        std_gflops_list = [3.19, 5.22, 2.91, 6.62, 
                           23.88, 35.88, 14.32, 11.81, 
                           39.30, 17.76, 13.18, 18.02, 
                           64.82, 67.37, 55.44, 56.52]
        
        

        # # index_array = np.array([x for x in range(200, 400)])

        # datafn = f'../data/200data_large_for_csv.hdf5'
        # print('#loading dataset', datafn, '...')
        # cache = h5py.File(datafn, 'r')
        # density_map_all = cache['density_map'][index_array]
        # print('#density_map:', density_map_all.shape, density_map_all.dtype)
        # manual_feature_all = cache['manual_feature'][index_array]
        # print('#manual_feature:', manual_feature_all.shape, manual_feature_all.dtype)
        # nnz_number_all = cache['nnz_number'][index_array]
        # nnz_distribution_all = cache['nnz_row_distribution'][index_array]
        # cache.close()

        ########------------------------------------------------------------------

        voxel_datafn = '../data/16_voxel.h5py'
        # voxel_datafn = '../data/4_voxel.h5py'
        print('#loading dataset', voxel_datafn, '...')
        cache = h5py.File(voxel_datafn, 'r')

        csr_store_length = cache['csr_store_length'][()]
        feature_buffer_all = cache['feature_buffer'][()]
        coordinate_buffer_all = cache['coordinate_buffer'][()]
        number_buffer_all = cache['number_buffer'][()]
        density_buffer_all = cache['density_buffer'][()]
        nnz_number_all = cache['nnz_number'][()]
        
        # density_map_all = cache['density_map'][()]
        # density_local_map_all = cache['density_local_map'][()]
        # density_map_row_all = cache['density_map_row'][()]
        # density_map_col_all = cache['density_map_col'][()]

        cache.close()
        
        
        trainset = Voxel_trainSet(csr_store_length, feature_buffer_all, coordinate_buffer_all, number_buffer_all, 
                                density_buffer_all, nnz_number_all, sparse_name_list, mean_gflops_list, std_gflops_list)
        trainloader = DataLoader(trainset, batch_size=self.threads_num, shuffle=False,  collate_fn=collate_fn)
        
        
        self.data_buffer_matrix = []
        for i in range(self.data_number):
            self.data_buffer_matrix.append(trainset[i])

        # for itera, batch_data in enumerate(trainloader):
        #     # ---------------
        #     # # t_record = time.perf_counter()
        #     # -------------------------
        #     # value_loss_supervised = 0
        #     # policy_loss_supervised = 0
        #     # mean_gflops_list = []
            
        #     for batch_itera in range(self.threads_num):
                
        #         #--------------------------------------------
        #         idx = batch_data['idx'][batch_itera]
        #         sparse_name = batch_data['sparse_name'][batch_itera]
        #         # density_map = batch_data['density_map'][batch_itera]
        #         # density_local_map =  batch_data['density_local_map'][batch_itera]
        #         nnz_number = batch_data['nnz_num'][batch_itera]
                
        #         feature_buffer = batch_data['feature_buffer'][batch_itera: batch_itera+1]
        #         coordinate_buffer = batch_data['coordinate_buffer'][batch_itera: batch_itera+1]
        #         density_buffer = batch_data['density_buffer'][batch_itera: batch_itera+1]
        
        
        #------------------    
        
        
        
        #-------------------
        
        try:    
            # for i in range(self.game_batch_num / len(trainloader)):
            for i in range(self.start_itera + 1, self.game_batch_num):
                #####
                for itera, batch_data_thread in enumerate(trainloader):              
                    
                    threads = []
                    for j in range(self.threads_num):
                        
                        t = threading.Thread(target=self.collect_selfplay_data_databuffer,args=(
                                                        batch_data_thread[j]['sparse_name'], batch_data_thread[j], 
                                                        batch_data_thread[j]['mean_gflops'], batch_data_thread[j]['std_gflops'],
                                                            # mean_gflops_list[ j+1 ], 
                                                            # std_gflops_list[ j+1 ],
                                                            batch_data_thread[j]['idx'],
                                                            j, self.play_batch_size))
                        
                        # t = threading.Thread(target=self.collect_selfplay_data_databuffer,args=(trainset[4 *  (j+1) ]['sparse_name'], trainset[4 * (j+1) ], 
                        #                                                             mean_gflops_list[ j+1 ], 
                        #                                                             std_gflops_list[ j+1 ],
                        #                                                             j, self.play_batch_size))
                        
                        t.start()
                        threads.append(t)
                    for t in threads:
                        t.join()
                    
                    threads = []
                        
                    # self.collect_selfplay_data_databuffer(trainset[8]['sparse_name'], trainset[8], 
                    #                            mean_gflops_list[2], std_gflops_list[2], 
                    #                            0, self.play_batch_size)
                    data_buffer_length = [len(x) for x in self.data_buffer]
                    data_buffer_length = sum(data_buffer_length)
                    print("batch i:{}, episode_len:{}, data_buffer:{}".format(i+1, self.episode_len, data_buffer_length))
                    # print("batch i:{}, episode_len:{}, data_buffer:{}".format(i+1, self.episode_len, len(self.data_buffer)))
                    # if len(self.data_buffer) > self.batch_size:
                    if data_buffer_length > self.batch_size * 3:
                        loss, entropy = self.policy_update_databuffer()
                        torch.cuda.empty_cache()
                    # # check the performance of the current model,
                    # # and save the model params
                    # if (i+1) % self.check_freq == 0:
                    #     print("current self-play batch: {}".format(i+1))
                    #     # win_ratio = self.policy_evaluate()
                    #     self.policy_value_net.save_model(f'./chk/current_policy_{str(i+1).zfill(3)}.model')
                    if ((i+1) % self.check_freq == 0 and itera == 0) or (itera+1) % 4 == 0:
                        print("current self-play batch: {}".format(i+1))
                        # win_ratio = self.policy_evaluate()
                        model_save_path = f'../chk/current_policy_{str(i+1).zfill(3)}_itera{str(itera+1).zfill(3)}_c{self.c_puct}_n{self.n_playout}_di08_buffer{self.buffer_size}_traineval.model'
                        self.policy_value_net.save_model(model_save_path)
                        data_save_path = f'../chk/current_policy_{str(i+1).zfill(3)}_itera{str(itera+1).zfill(3)}_c{self.c_puct}_n{self.n_playout}_di08_buffer{self.buffer_size}_traineval.data'
                        self.save_data(data_save_path, i)
                    #     if win_ratio > self.best_win_ratio:
                    #         print("New best policy!!!!!!!!")
                    #         self.best_win_ratio = win_ratio
                    #         # update the best_policy
                    #         self.policy_value_net.save_model('./best_policy.model')
                    #         if (self.best_win_ratio == 1.0 and
                    #                 self.pure_mcts_playout_num < 5000):
                    #             self.pure_mcts_playout_num += 1000
                    #             self.best_win_ratio = 0.0
        except KeyboardInterrupt:
            print('\n\rquit')


if __name__ == '__main__':
    
    torch.cuda.set_device(0)
    setup_seed(20230422)
    # batch_iterate_times = 1
    threadLock = threading.Lock()
    # model_path = f'../chk/current_policy_024_itera004_c3_n50_di08__.model'
    model_path = '../chk/current_policy_025_itera004_c3_n200_di08_buffer50_traineval.model'
    data_path = '../chk/current_policy_025_itera004_c3_n200_di08_buffer50_traineval.data'
    training_pipeline = TrainPipeline(data_number = 16, init_model=model_path, init_data=data_path)

    
    # training_pipeline = TrainPipeline(data_number=16)
    training_pipeline.run()
