# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np

from typing import Set
from functools import partial
import spconv.pytorch as spconv


def set_learning_rate(optimizer, lr):
    """Sets the learning rate to the given value"""
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]:
    """
    Finds all spconv keys that need to have weight's transposed
    """
    found_keys: Set[str] = set()
    for name, child in model.named_children():
        new_prefix = f"{prefix}.{name}" if prefix != "" else name

        if isinstance(child, spconv.conv.SparseConvolution):
            new_prefix = f"{new_prefix}.weight"
            found_keys.add(new_prefix)

        found_keys.update(find_all_spconv_keys(child, prefix=new_prefix))

    return found_keys

def replace_feature(out, new_features):
    if "replace_feature" in out.__dir__():
        # spconv 2.x behaviour
        return out.replace_feature(new_features)

    else:
        out.features = new_features
        return out

def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0,
                   conv_type='subm', norm_fn=None):

    if conv_type == 'subm':
        conv = spconv.SubMConv2d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key)
    elif conv_type == 'spconv':
        conv = spconv.SparseConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding,
                                   bias=False, indice_key=indice_key)
    elif conv_type == 'inverseconv':
        conv = spconv.SparseInverseConv2d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False)
    else:
        raise NotImplementedError

    m = spconv.SparseSequential(
        conv,
        norm_fn(out_channels),
        nn.ReLU(),
    )

    return m

def post_act_block_dense(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_fn=None):
    m = nn.Sequential(
        nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False),
        norm_fn(out_channels),
        nn.ReLU(),
    )

    return m

class SparseBasicBlock(spconv.SparseModule):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None):
        super(SparseBasicBlock, self).__init__()

        assert norm_fn is not None
        bias = norm_fn is not None
        self.conv1 = spconv.SubMConv2d(
            inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
        )
        self.bn1 = norm_fn(planes)
        self.relu = nn.ReLU()
        self.conv2 = spconv.SubMConv2d(
            planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key
        )
        self.bn2 = norm_fn(planes)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = replace_feature(out, self.bn1(out.features))
        out = replace_feature(out, self.relu(out.features))

        out = self.conv2(out)
        out = replace_feature(out, self.bn2(out.features))

        if self.downsample is not None:
            identity = self.downsample(x)

        out = replace_feature(out, out.features + identity.features)
        out = replace_feature(out, self.relu(out.features))

        return out
    

class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None):
        super(BasicBlock, self).__init__()

        assert norm_fn is not None
        bias = norm_fn is not None
        self.conv1 = nn.Conv2d(inplanes, planes, 3, stride=stride, padding=1, bias=bias)
        self.bn1 = norm_fn(planes)
        self.relu = nn.ReLU()
        self.conv2 = nn.Conv2d(planes, planes, 3, stride=stride, padding=1, bias=bias)
        self.bn2 = norm_fn(planes)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out = out + identity
        out = self.relu(out)

        return out
    
class LinearBlock(nn.Module):
    def __init__(self, in_dims, out_dims):
        super(LinearBlock, self).__init__()
        
        self.Linear1 = nn.Linear(in_dims, out_dims)
        self.norm1 = nn.LayerNorm(out_dims)
        self.activate = nn.GELU()
        
        self.Linear2 = nn.Linear(out_dims, out_dims)
        self.norm2 = nn.LayerNorm(out_dims)

    def forward(self, x):
        
        out = self.Linear1(x)
        out = self.norm1(out)
        out = self.activate(out)
        
        out = self.Linear2(out)
        out = self.norm2(out)
        out = self.activate(out)

        return out
    
class LinearResBlock(nn.Module):
    def __init__(self, in_dims):
        super(LinearResBlock, self).__init__()
        out_dims = in_dims
        
        self.Linear1 = nn.Linear(in_dims, out_dims)
        self.norm1 = nn.LayerNorm(out_dims)
        self.activate = nn.GELU()
        
        self.Linear2 = nn.Linear(out_dims, out_dims)
        self.norm2 = nn.LayerNorm(out_dims)

    def forward(self, x):
        identity = x
    
        out = self.Linear1(x)
        out = self.norm1(out)
        out = self.activate(out)
        
        out = self.Linear2(out)
        out = self.norm2(out)
        
        out = out + identity
        out = self.activate(out)

        return out
    
    
class VFELayer(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(VFELayer, self).__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.units = int(out_channels / 2)

        self.dense = nn.Sequential(nn.Linear(self.in_channels, self.units), nn.ReLU())
        self.batch_norm = nn.BatchNorm1d(self.units)


    def forward(self, inputs, mask):
        # [ΣK, T, in_ch] -> [ΣK, T, units] -> [ΣK, units, T]
        tmp = self.dense(inputs).transpose(1, 2)
        # [ΣK, units, T] -> [ΣK, T, units]
        pointwise = self.batch_norm(tmp).transpose(1, 2)
        
        # [ΣK, 1, units]
        aggregated, _ = torch.max(pointwise, dim = 1, keepdim = True)

        # [ΣK, T, units]
        repeated = aggregated.expand(-1, 45, -1)

        # [ΣK, T, 2 * units]
        concatenated = torch.cat([pointwise, repeated], dim = 2)

        # [ΣK, T, 1] -> [ΣK, T, 2 * units]
        mask = mask.expand(-1, -1, 2 * self.units)

        concatenated = concatenated * mask.float()

        return concatenated


class FeatureNet_small_part(nn.Module):
    def __init__(self):
        super(FeatureNet_small_part, self).__init__()

        self.vfe1 = VFELayer(4, 32)
        self.vfe2 = VFELayer(32, 32)


    def forward(self, feature, coordinate):

        # batch_size = len(feature)
        feature = torch.cat(feature, dim = 0).cuda()   # [��K, cfg.VOXEL_POINT_COUNT, 7]; cfg.VOXEL_POINT_COUNT = 35/45
        coordinate = torch.cat(coordinate, dim = 0).cuda()     # [��K, 4]; each row stores (batch, d, h, w)

        vmax, _ = torch.max(feature, dim = 2, keepdim = True)
        mask = (vmax != 0)  # [��K, T, 1]

        x = self.vfe1(feature, mask)
        x = self.vfe2(x, mask)

        # [��K, 128]
        voxelwise, _ = torch.max(x, dim = 1)

        # Car: [B, 10, 400, 352, 128]; Pedestrain/Cyclist: [B, 10, 200, 240, 128]
        # outputs = torch.sparse.FloatTensor(coordinate.t(), voxelwise, torch.Size([batch_size, 256, 256, 128]))
        # outputs = outputs.to_dense()

        return voxelwise, coordinate # outputs


class Sparse_Res_Bone8x_small_part(nn.Module):
    def __init__(self, ):  #model_cfg, input_channels, grid_size, **kwargs):
        super().__init__()
        # self.model_cfg = model_cfg
        norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
        self.sparse_shape = [256, 256]
        
        block = post_act_block
        dense_block = post_act_block_dense
        
        # 256 * 32
        self.conv1 = spconv.SparseSequential(
            SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'),
            SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'),
        )

        # 128 * 64
        self.conv2 = spconv.SparseSequential(
            block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
            SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'),
            SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'),
        )

        # 64 * 128
        self.conv3 = spconv.SparseSequential(
            block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
            SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'),
            SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'),
        )

        # 32 * 256
        self.conv4 = spconv.SparseSequential(
            block(128, 256, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv4', conv_type='spconv'),
            SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'),
            SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'),
        )
        
        # 16 * 256
        norm_fn = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01)
        self.conv5 = nn.Sequential(
            dense_block(256, 256, 3, norm_fn=norm_fn, stride=2, padding=1),
            BasicBlock(256, 256, norm_fn=norm_fn),
            BasicBlock(256, 256, norm_fn=norm_fn),
        )

        # self.num_point_features = 256
        self.backbone_channels = {
            'x_conv1': 32,
            'x_conv2': 64,
            'x_conv3': 128,
            'x_conv4': 256,
            'x_conv5': 256
        }
        
        self.adapt_pool = nn.AdaptiveAvgPool2d((2, 2))
        
        # self.envs_feature_embedding = nn.Sequential(
        #                                 nn.Linear(6, 256),   nn.LayerNorm(256), nn.GELU(),
        #                                 # nn.Linear(62, 256),   nn.LayerNorm(256), nn.GELU(),  
        #                                 nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
        #                                 nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
        #                                 nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
                                        
        #                                 # nn.Linear(256, 1024),        nn.LayerNorm(1024), nn.GELU(),  
        #                                 # nn.Linear(1024, 1024),       nn.LayerNorm(1024), nn.GELU(),  
        #                                 # nn.Linear(1024, 1024),       nn.LayerNorm(1024), nn.GELU(),  
        #                                 )
        self.envs_feature_embedding = nn.ModuleList([LinearBlock(6, 256)] + [LinearResBlock(256) for i in range(3)])
        
        # self.csv_feature_embedding = nn.Sequential(
        #                         nn.Linear(6, 256),   nn.LayerNorm(256), nn.GELU(),
        #                         # nn.Linear(62, 256),   nn.LayerNorm(256), nn.GELU(),  
        #                         nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
        #                         nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
        #                         nn.Linear(256, 256),       nn.LayerNorm(256), nn.GELU(),  
                                
        #                         # nn.Linear(256, 1024),        nn.LayerNorm(1024), nn.GELU(),  
        #                         # nn.Linear(1024, 1024),       nn.LayerNorm(1024), nn.GELU(),  
        #                         # nn.Linear(1024, 1024),       nn.LayerNorm(1024), nn.GELU(),  
        #                         )
        self.csv_feature_embedding = nn.ModuleList([LinearBlock(17, 256)] + [LinearResBlock(256) for i in range(3)])
        
        # self.embed_policy = nn.Sequential(
        #                                   nn.Linear(1024 + 256 + 256, 1024), nn.LayerNorm(1024), nn.GELU()
        #                                   )
        self.embed_policy = nn.ModuleList([LinearBlock(1024 + 256 + 256, 1024)] + [LinearResBlock(1024) for i in range(2)])
        
        # self.embed_value = nn.Sequential(
        #                                  nn.Linear(1024 + 256 + 256, 1024), nn.LayerNorm(1024), nn.GELU()
        #                                  )
        self.embed_value = nn.ModuleList([LinearBlock(1024 + 256 + 256, 1024)] + [LinearResBlock(1024) for i in range(2)])
        
        self.policy_prob_head = nn.Sequential(nn.Linear(1024, 512),  nn.LayerNorm(512), nn.GELU(), nn.Linear(512, 3))
        self.policy_value_prob_head1 = nn.Sequential(nn.Linear(1024, 512), nn.LayerNorm(512), nn.GELU(), nn.Linear(512, 32))
        self.policy_value_prob_head2 = nn.Sequential(nn.Linear(1024, 512), nn.LayerNorm(512), nn.GELU(), nn.Linear(512, 32))
        self.policy_value_prob_head3 = nn.Sequential(nn.Linear(1024, 512), nn.LayerNorm(512), nn.GELU(), nn.Linear(512, 32))
        
        self.value_head = nn.Sequential(nn.Linear(1024, 512), nn.LayerNorm(512), nn.GELU(), nn.Linear(512, 1))


    def forward(self, features, coords, batch_size, envs_feature, csv_feature): # batch_dict):
        # pillar_features, pillar_coords = batch_dict['pillar_features'], batch_dict['pillar_coords']
        # batch_size = batch_dict['batch_size']
        
        input_sp_tensor = spconv.SparseConvTensor(
            features=features,
            indices=coords.int(),
            spatial_shape=self.sparse_shape,
            batch_size=batch_size
        )
        
        x_conv1 = self.conv1(input_sp_tensor)
        x_conv2 = self.conv2(x_conv1)
        x_conv3 = self.conv3(x_conv2)
        x_conv4 = self.conv4(x_conv3)
        x_conv4_dense = x_conv4.dense()
        x_conv5 = self.conv5(x_conv4_dense)
            
        xx = self.adapt_pool(x_conv5).contiguous().reshape(x_conv5.shape[0], -1)
        
        # envs_feature = self.envs_feature_embedding(envs_feature)
        # csv_feature = self.csv_feature_embedding(csv_feature)
        for layer in self.envs_feature_embedding:
            envs_feature = layer(envs_feature)
        for layer in self.csv_feature_embedding:
            csv_feature = layer(csv_feature)
        
        xx = torch.cat([csv_feature, envs_feature, xx], dim=1)
        # policy_embed = self.embed_policy(xx)
        # value_embed = self.embed_value(xx)
        
        policy_embed = xx
        for layer in self.embed_policy:
            policy_embed = layer(policy_embed)
        value_embed = xx
        for layer in self.embed_value:
            value_embed = layer(value_embed)
        
        # xx = self.encoder_layers(xx)
        
        # policy_embed = self.embed_policy(xx[:,0])
        # value_embed = self.embed_value(xx[:,0])
        
        policy_prob =   F.log_softmax(self.policy_prob_head(policy_embed), -1)
        policy_value1 = F.log_softmax(self.policy_value_prob_head1(policy_embed), -1)
        policy_value2 = F.log_softmax(self.policy_value_prob_head2(policy_embed), -1)
        policy_value3 = F.log_softmax(self.policy_value_prob_head3(policy_embed), -1)
        
        value   = self.value_head(value_embed)

        return policy_prob, (policy_value1, policy_value2, policy_value3), value

        # batch_dict.update({
        #     'encoded_spconv_tensor': out,
        #     'encoded_spconv_tensor_stride': 8
        # })
        # batch_dict.update({
        #     'multi_scale_2d_features': {
        #         'x_conv1': x_conv1,
        #         'x_conv2': x_conv2,
        #         'x_conv3': x_conv3,
        #         'x_conv4': x_conv4,
        #         'x_conv5': x_conv5,
        #     }
        # })
        # batch_dict.update({
        #     'multi_scale_2d_strides': {
        #         'x_conv1': 1,
        #         'x_conv2': 2,
        #         'x_conv3': 4,
        #         'x_conv4': 8,
        #         'x_conv5': 16,
        #     }
        # })
        
        return x_conv5


class Voxel_sparse_small_net(nn.Module):
    def __init__(self):
        super(Voxel_sparse_small_net, self).__init__()

        # self.feature = FeatureNet()
        self.feature = FeatureNet_small_part()
        # self.conv_part = SparseNet_onehot_linear_part()
        self.conv_part = Sparse_Res_Bone8x_small_part()

    def forward(self, feature_buffer, coordinate_buffer, density_buffer, envs_feature, csv_data):
        vox_feature = feature_buffer
        vox_coordinate = coordinate_buffer
        batch_size = len(vox_coordinate)

        voxel_wise, vox_coordinate = self.feature(vox_feature, vox_coordinate)
        density_buffer = torch.cat(density_buffer, dim = 0).cuda()
        # voxel_wise = torch.cat([voxel_wise, density_buffer], dim=-1)
        # features = features.permute(0, 3, 1, 2)  # (B, D, H, W, C) -> (B, C, D, H, W)
        # out = 
        return self.conv_part(voxel_wise, vox_coordinate, batch_size, envs_feature, csv_data)
        # return self.conv_part(features)
        
        

class PolicyValueNet_sparse():
    """policy-value network """
    def __init__(self, board_width, board_height,
                 model_file=None, use_gpu=True):
        self.use_gpu = use_gpu
        self.board_width = board_width
        self.board_height = board_height
        self.l2_const = 5e-6  # coef of l2 penalty
        # the policy value net module
        # if self.use_gpu:
        self.policy_value_net = Voxel_sparse_small_net().cuda()
        # else:
            # self.policy_value_net = Net()
        self.optimizer = optim.Adam(self.policy_value_net.parameters(),
                                    weight_decay=self.l2_const)

        if model_file:
            net_params = torch.load(model_file)
            self.policy_value_net.load_state_dict(net_params)

    def policy_value(self, state_batch):
        """
        input: a batch of states
        output: a batch of action probabilities and state values
        """
        # if self.use_gpu:
        # train network
        self.policy_value_net.train()
        
        sparse_matrix_batch, envs_feature_batch = zip(*state_batch)
        
        idx = [ret['idx'] for ret in sparse_matrix_batch]
        # sparse_name = [ret['sparse_name'] for ret in sparse_matrix_batch]
        # nnz_num = [ret['nnz_num'] for ret in sparse_matrix_batch]
        
        feature_buffer = [ret['feature_buffer'] for ret in sparse_matrix_batch]
        coordinate_buffer_tmp = [ret['coordinate_buffer'] for ret in sparse_matrix_batch]
        number_buffer = [ret['number_buffer'] for ret in sparse_matrix_batch]
        density_buffer = [ret['density_buffer'] for ret in sparse_matrix_batch]
        csv_data = [ret['csv_data'] for ret in sparse_matrix_batch]
        
        batch_size = len(idx)
        coordinate_list = []
        for i, voxel_dict in zip(range(batch_size), coordinate_buffer_tmp):
            coordinate = voxel_dict        # (K, 2)
            coordinate_list.append(np.pad(coordinate, ((0, 0), (1, 0)), mode = 'constant', constant_values = i))
        
        feature_buffer = [torch.from_numpy(x) for x in feature_buffer]
        coordinate_buffer = [torch.from_numpy(x) for x in coordinate_list]
        number_buffer = [torch.from_numpy(x) for x in number_buffer]
        density_buffer = [torch.from_numpy(x) for x in density_buffer]
        
        # sparse_matrix_batch = np.array(sparse_matrix_batch)
        
        csv_data = np.array(csv_data)
        csv_data = Variable(torch.FloatTensor(csv_data).cuda())
        
        envs_feature_batch = np.array(envs_feature_batch)
        envs_feature_batch = Variable(torch.FloatTensor(envs_feature_batch).cuda())
        
        # sparse_matrix_batch = Variable(torch.FloatTensor(sparse_matrix_batch).cuda())
        # sparse_matrix_batch = torch.unsqueeze(sparse_matrix_batch, dim=1)
        # manual_feature_batch = Variable(torch.FloatTensor(manual_feature_batch).cuda())
        
        # state_batch = Variable(torch.FloatTensor(state_batch).cuda())
        # log_act_probs, value = self.policy_value_net(state_batch)  
        # act_probs = np.exp(log_act_probs.data.cpu().numpy())      
        # print(sparse_matrix_batch.shape)
        # print(manual_feature_batch.shape)
        # input()
        
        # log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(state_batch)
        # log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(sparse_matrix_batch, manual_feature_batch)
        log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(feature_buffer, coordinate_buffer, density_buffer, envs_feature_batch, csv_data)
        
        policy_prob = np.exp(log_policy_prob.data.cpu().numpy()) 
        policy_value1 = np.exp(log_policy_value1.data.cpu().numpy())
        policy_value2 = np.exp(log_policy_value2.data.cpu().numpy())
        policy_value3 = np.exp(log_policy_value3.data.cpu().numpy())
        torch.cuda.empty_cache()
        
        # policy_value = torch.stack((policy_value1, policy_value2, policy_value3), dim=1)
        
        # return policy_prob, policy_value, value.data.cpu().numpy()
        return policy_prob, [policy_value1, policy_value2, policy_value3], value.data.cpu().numpy()

        # else:
        #     state_batch = Variable(torch.FloatTensor(state_batch))
        #     log_act_probs, value = self.policy_value_net(state_batch)
        #     act_probs = np.exp(log_act_probs.data.numpy())
        #     return act_probs, value.data.numpy()

    def policy_value_fn(self, board):
        """
        input: board
        output: a list of (action, probability) tuples for each available
        action and the score of the board state
        """
        # eval network
        self.policy_value_net.eval()
        
        legal_positions = board.availables
        
        # current_state = np.ascontiguousarray(board.current_state().reshape(
        #         -1, 4, self.board_width, self.board_height))
        sparse_matrix_batch, envs_feature_batch = board.current_state()
        envs_feature_batch = np.expand_dims(envs_feature_batch, axis=0)
        
        # idx = sparse_matrix_batch['idx']
        # sparse_name = sparse_matrix_batch['sparse_name']
        # density_map = batch_data['density_map'][batch_itera]
        # density_local_map =  batch_data['density_local_map'][batch_itera]
        # nnz_number = sparse_matrix_batch['nnz_num']
        
        feature_buffer = sparse_matrix_batch['feature_buffer']
        density_buffer = sparse_matrix_batch['density_buffer']
        coordinate_buffer = sparse_matrix_batch['coordinate_buffer']
        csv_data = sparse_matrix_batch['csv_data']
        csv_data = np.expand_dims(csv_data, axis=0)
        
        # feature_buffer = np.expand_dims(feature_buffer, axis=0)
        # density_buffer = np.expand_dims(density_buffer, axis=0)
        
        coordinate_buffer = np.pad(coordinate_buffer, ((0, 0), (1, 0)), mode = 'constant', constant_values = 0)
        # coordinate_buffer = np.expand_dims(coordinate_buffer, axis=0)
        
        feature_buffer = [torch.from_numpy(feature_buffer)]
        density_buffer = [torch.from_numpy(density_buffer)]
        coordinate_buffer = [torch.from_numpy(coordinate_buffer)]
        
        # sparse_matrix_batch = np.expand_dims(np.expand_dims(sparse_matrix_batch, axis=0), axis=0)
        # manual_feature_batch = np.expand_dims(manual_feature_batch, axis=0)
        
        # sparse_matrix_batch = Variable(torch.FloatTensor(sparse_matrix_batch).cuda())
        envs_feature_batch = Variable(torch.FloatTensor(envs_feature_batch).cuda())
        csv_data = Variable(torch.FloatTensor(csv_data).cuda())
        
        
        # log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(current_state)
        # log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(sparse_matrix_batch, manual_feature_batch)
        log_policy_prob, (log_policy_value1, log_policy_value2, log_policy_value3), value = self.policy_value_net(feature_buffer, 
                                                                                                                  coordinate_buffer, density_buffer, 
                                                                                                                  envs_feature_batch, csv_data)
        
        policy_prob = np.exp(log_policy_prob.data.cpu().numpy().flatten()) 
        policy_value1 = np.exp(log_policy_value1.data.cpu().numpy().flatten())
        policy_value2 = np.exp(log_policy_value2.data.cpu().numpy().flatten())
        policy_value3 = np.exp(log_policy_value3.data.cpu().numpy().flatten())
        policy_value = [policy_value1, policy_value2, policy_value3]
        policy_value = np.stack(policy_value, axis = 0)
        
        # if self.use_gpu:
        # log_act_probs, value = self.policy_value_net(Variable(torch.from_numpy(current_state)).cuda().float())
        # act_probs = np.exp(log_act_probs.data.cpu().numpy().flatten())
        # else:
        #     log_act_probs, value = self.policy_value_net(
        #             Variable(torch.from_numpy(current_state)).float())
        #     act_probs = np.exp(log_act_probs.data.numpy().flatten())
        
        act_probs = zip(legal_positions, policy_prob[legal_positions], policy_value[legal_positions])
        value = value.data[0][0]
        
        torch.cuda.empty_cache()
        
        return act_probs, value

    def train_step(self, state_batch, mcts_probs, mcts_value_probs, winner_batch, move_batch, lr):
        """perform a training step"""
        
        # wrap in Variable
        # if self.use_gpu:
        
        # sparse_matrix_batch, manual_feature_batch = zip(*state_batch)
        
        # sparse_matrix_batch = np.array(sparse_matrix_batch)
        # manual_feature_batch = np.array(manual_feature_batch)
        
        # sparse_matrix_batch = Variable(torch.FloatTensor(sparse_matrix_batch).cuda())
        # sparse_matrix_batch = torch.unsqueeze(sparse_matrix_batch, dim=1)
        # manual_feature_batch = Variable(torch.FloatTensor(manual_feature_batch).cuda())
        
        # state_batch = Variable(torch.FloatTensor(state_batch).cuda())
        
        #---------------------
        self.policy_value_net.train()
        
        sparse_matrix_batch, envs_feature_batch = zip(*state_batch)
        
        idx = [ret['idx'] for ret in sparse_matrix_batch]
        # sparse_name = [ret['sparse_name'] for ret in sparse_matrix_batch]
        # nnz_num = [ret['nnz_num'] for ret in sparse_matrix_batch]
        
        feature_buffer = [ret['feature_buffer'] for ret in sparse_matrix_batch]
        coordinate_buffer_tmp = [ret['coordinate_buffer'] for ret in sparse_matrix_batch]
        number_buffer = [ret['number_buffer'] for ret in sparse_matrix_batch]
        density_buffer = [ret['density_buffer'] for ret in sparse_matrix_batch]
        csv_data = [ret['csv_data'] for ret in sparse_matrix_batch]
        
        batch_size = len(idx)
        coordinate_list = []
        for i, voxel_dict in zip(range(batch_size), coordinate_buffer_tmp):
            coordinate = voxel_dict        # (K, 2)
            coordinate_list.append(np.pad(coordinate, ((0, 0), (1, 0)), mode = 'constant', constant_values = i))
        
        feature_buffer = [torch.from_numpy(x) for x in feature_buffer]
        coordinate_buffer = [torch.from_numpy(x) for x in coordinate_list]
        number_buffer = [torch.from_numpy(x) for x in number_buffer]
        density_buffer = [torch.from_numpy(x) for x in density_buffer]
      
        # sparse_matrix_batch = np.array(sparse_matrix_batch)
      
        csv_data = np.array(csv_data)
        csv_data = Variable(torch.FloatTensor(csv_data).cuda())

        envs_feature_batch = np.array(envs_feature_batch)
        envs_feature_batch = Variable(torch.FloatTensor(envs_feature_batch).cuda())
        
        #---------------------
        
        mcts_probs = np.array(mcts_probs)
        mcts_value_probs = np.array(mcts_value_probs)
        winner_batch = np.array(winner_batch)
        
        mcts_probs = Variable(torch.FloatTensor(mcts_probs).cuda())
        mcts_value_probs = Variable(torch.FloatTensor(mcts_value_probs).cuda())
        winner_batch = Variable(torch.FloatTensor(winner_batch).cuda())
        # else:
        #     state_batch = Variable(torch.FloatTensor(state_batch))
        #     mcts_probs = Variable(torch.FloatTensor(mcts_probs))
        #     winner_batch = Variable(torch.FloatTensor(winner_batch))

        # zero the parameter gradients
        self.optimizer.zero_grad()
        # set learning rate
        set_learning_rate(self.optimizer, lr)

        # forward
        log_act_probs, log_act_value_probs, value = self.policy_value_net(feature_buffer, coordinate_buffer, density_buffer, envs_feature_batch, csv_data)
        
        # define the loss = (z - v)^2 - pi^T * log(p) + c||theta||^2
        # Note: the L2 penalty is incorporated in optimizer
        value_loss = F.mse_loss(value.view(-1), winner_batch.view(-1))
        policy_loss = - torch.mean(torch.sum(mcts_probs*log_act_probs, 1))
        
        # policy_selection = torch.argmax(log_act_probs, dim=-1)
        # policy_selection = torch.argmax(mcts_probs, dim=-1)
        policy_selection = np.array(move_batch)
        batch_accumulate_base = np.arange(policy_selection.shape[0]) * 3
        policy_selection_index = batch_accumulate_base + policy_selection
        
        log_act_value_probs = torch.stack(log_act_value_probs, dim=1)
        log_act_value_probs = log_act_value_probs.reshape(-1, log_act_value_probs.shape[-1])
        
        policy_value_loss = - torch.mean(torch.sum(mcts_value_probs*log_act_value_probs[policy_selection_index], 1))
        
        
        loss = value_loss + policy_loss + policy_value_loss
        # backward and optimize
        loss.backward()
        self.optimizer.step()
        # calc policy entropy, for monitoring only
        entropy = - torch.mean(
                torch.sum(torch.exp(log_act_probs) * log_act_probs, 1)
                )
        torch.cuda.empty_cache()
        # return loss.data[0], entropy.data[0]
        #for pytorch version >= 0.5 please use the following line instead.
        # return loss.item(), entropy.item()
        return value_loss.item(), policy_loss.item(), policy_value_loss.item(), entropy.item()

    def get_policy_param(self):
        net_params = self.policy_value_net.state_dict()
        return net_params


    def save_model(self, model_file):
        """ save model params to file """
        net_params = self.get_policy_param()  # get model params
        torch.save(net_params, model_file)
        