import numpy as np
import random
# from stable_baselines3 import PPO,A2C
import math

import torch
import torch.nn as nn
import torch.nn.functional as F

from .utils import vaild_action_filter,ACTIONS


device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')

class CNNEncoder(nn.Module):
    def __init__(self, features_size, hidden_size) -> None:
        super(CNNEncoder,self).__init__()
        self.cnn_layer1_3x3 = nn.Conv2d(features_size,hidden_size,kernel_size=3,padding=1)
        self.batch_normal1 = nn.BatchNorm2d(hidden_size)
        self.cnn_layer2_3x3 = nn.Conv2d(hidden_size,hidden_size,kernel_size=3,padding=2,dilation=2)
        self.batch_normal2 = nn.BatchNorm2d(hidden_size)
        self.activate = nn.ReLU()

    def forward(self,observation, target = None):
        """
        提取某一点的特征
        observation: torch.FloatTensor [B,C,H,W]
        target: [B, 4/6]
        return: features [B,80]
        """
        # 这个地方得改一下，加入能不能炸到目标，距离目标距离信息
        # 自己特征
        self_id,self_x,self_y = torch.where(observation[:,-1].detach().data.cpu()==1)
        self_x = self_x.to(observation.device)
        self_y = self_y.to(observation.device)
        batch_size = observation.size(0)
        
        # 近距离 3x3 卷积 感受野: 1
        low_level_features = self.cnn_layer1_3x3(observation)
        low_level_features = self.batch_normal1(low_level_features)
        low_level_features = self.activate(low_level_features)

        # 中距离特征 3x3 空洞卷积(实际相当于5x5) 感受野: 3
        mid_level_features = self.cnn_layer2_3x3(low_level_features)
        mid_level_features = self.batch_normal2(mid_level_features)
        mid_level_features = self.activate(low_level_features + mid_level_features)
        self_mid_level_features = mid_level_features[np.arange(batch_size),:,self_x,self_y]

        if target is not None:
            target_mid_level_features = mid_level_features[np.arange(batch_size),:,target[:,1],target[:,2]]
            features = torch.cat((self_mid_level_features,target_mid_level_features),dim = 1)
        else:
            features = self_mid_level_features
        return features

class LowQNet(nn.Module):
    def __init__(self) -> None:
        super(LowQNet,self).__init__()
        # 任务类型，任务x坐标，任务y坐标，任务距离，是否可以炸到目标，到目标的下一步行动
        self.low_encoder = CNNEncoder(12,64)
        self.task_type = nn.Embedding(3, 32)
        self.task_distance = nn.Embedding(50, 32)
        self.task_can_bomb_target = nn.Embedding(2, 32)
        self.task_next_action = nn.Embedding(5, 32)

        self.action_mlp = nn.Sequential(
            nn.Linear(192, 512),
            nn.ReLU(),
            nn.Linear(512, 6)
        )
    
    def forward(self, observation:torch.Tensor,target:torch.Tensor):
        observation_embed = self.low_encoder(observation)
        task_type_embed = self.task_type(target[:,0])
        task_distance_embed = self.task_distance(target[:,3])
        task_can_bomb_target_embed = self.task_can_bomb_target(target[:,4])
        task_next_action_embed = self.task_next_action(target[:,5])
        
        features = torch.cat((observation_embed,task_type_embed,
                              task_distance_embed,task_can_bomb_target_embed,
                              task_next_action_embed),dim=1)

        action = self.action_mlp(features)

        return action

class HighQNet(nn.Module):
    def __init__(self) -> None:
        super(HighQNet,self).__init__()
        # 任务类型，任务x坐标，任务y坐标，任务距离，是否可以炸到目标，到目标的下一步行动
        self.high_encoder = CNNEncoder(12, 64)
        self.task_type = nn.Embedding(3, 32)
        self.task_distance = nn.Embedding(50, 32)

        self.action_mlp = nn.Sequential(
            nn.Linear(192, 256),
            nn.ReLU(),
            nn.Linear(256, 1)
        )
    
    def forward(self, observation:torch.Tensor,targets:torch.Tensor):
        if observation.size(0) != targets.size(0) and observation.size(0)==1:
            targets_num = targets.size(0)
            observation = observation.repeat(targets_num,1,1,1)
        observation_embed = self.high_encoder(observation,targets) # [B,128]
        task_type_embed = self.task_type(targets[:,0])
        task_distance_embed = self.task_distance(targets[:,3])
        
        features = torch.cat((observation_embed,task_type_embed,
                              task_distance_embed),dim=1)

        values = self.action_mlp(features).squeeze(1)

        return values # [B]

class HierarchicalDQN(object):
    def __init__(self) -> None:
        self.high_gamma = math.pow(0.9, 5)  # 折扣因子
        self.low_gamma = math.pow(0.9, 5)  # 折扣因子
        self.eps = 0.2    # eps贪心策略
        self.lr = 1e-3
        self.target_update = 2 # 目标网络更新频率
        self.train = False

        # 底层网络
        self.low_count = 1
        self.low_q_net = LowQNet().to(device)
        self.target_low_q_net = LowQNet().to(device)
        self.target_low_q_net.load_state_dict(self.low_q_net.state_dict())
        self.low_optimizer = torch.optim.Adam(self.low_q_net.parameters(),lr=self.lr)

        # 高层网络
        self.high_count = 1
        self.high_q_net = HighQNet().to(device)
        self.target_high_q_net = HighQNet().to(device)
        self.target_high_q_net.load_state_dict(self.high_q_net.state_dict())
        self.high_optimizer = torch.optim.Adam(self.high_q_net.parameters(),lr=self.lr)

    def _prepare_action_input(self,state,all_next_targets:list = None):
        """
        all_next_targets 为None说明为action_low
        return:
        observation : [1, C, H, W]
        target : [1, 6] or targets : [1, T, 4]
        """
        if all_next_targets is None:
            # low level
            observation = torch.FloatTensor([state['observation']]).to(device)
            target = torch.tensor([state['target']['target_code']]).to(device)
            return observation,target
        else:
            # high level
            observation = torch.FloatTensor([state['observation']]).to(device)
            targets = []
            for target in all_next_targets:
                targets.append(target['target_high_code'])
            targets = torch.tensor(targets).to(device)
            return observation,targets
    
    def _prepare_train_input(self,batch_data):
        """
        把训练数据放在gpu上面
        """
        output = tuple()
        for data in batch_data:
            data = torch.tensor(data).to(device)
            if data.dtype == torch.float64:
                data = data.float()
            output = output + (data,)
        return output
    
    @torch.no_grad()
    def action_low(self,state) -> str:
        """
        state:{
            'observation': np.array(12,17,17)
            'game_state': {'round': 1, 'step': 1, 'field': np.array(H,W), 
                            'self': (name,score,can_bomb,position), 'others': [],
                            'bombs': [], 'coins': [(x,y),..], 'user_input': None, 'explosion_map': }
             }
            'target':{'target_name': 'eat_coin', 'target_position': (10, 5), 
                      'cur_step': 1, 'max_step': 24, 'can_bomb_target': False,
                      'target_direction': ['RIGHT', ...], 'target_code': [0, 10, 5, 19, 0, 1]}
        }
        """
        # self.low_q_net.eval()
        # observation,target = self._prepare_action_input(state)

        # valid_actions = vaild_action_filter(state['game_state'])
        # probs = self.low_q_net(observation,target).squeeze(0) # [action_space_size]
        # invalid_actions = list(set([_ for _ in range(6)]) - set(valid_actions))
        # for invalid_action in invalid_actions:
        #     probs[invalid_action] = -1e8
        # probs = torch.softmax(probs,dim=0)
        
        # if self.train:
        #     if random.random() > 0.2:
        #         action = np.random.choice(list(range(6)), p=probs.cpu().numpy())
        #     else:
        #         action = np.random.choice(list(range(6)), p=[.21, .21, .21, .21, .08, .08])
        # else:
        #     action = np.argmax(probs.cpu().numpy())
        # return ACTIONS[action]
        action = 'WAIT' if len(state['target']['target_direction']) == 0 else state['target']['target_direction'][0]
        print(action)
        return action
    
    @torch.no_grad()
    def action_high(self,state,all_next_targets):
        """
        state: {
            'observation': np.array(12,17,17)
            'game_state': {'round': 1, 'step': 1, 'field': np.array(H,W), 
                            'self': (name,score,can_bomb,position), 'others': [],
                            'bombs': [], 'coins': [(x,y),..], 'user_input': None, 'explosion_map': }
        }
        all_next_targets: [
            {'target_name': 'eat_coin', 'target_position': (9, 13), 'target_high_code': [0, 9, 13, 10]},
            ...
        ]
        """
        self.high_q_net.eval()
        observation,targets = self._prepare_action_input(state,all_next_targets)
        probs = self.high_q_net(observation,targets)    # [target_nums]
        probs = torch.softmax(probs,dim=0)

        if self.train:
            target_id = np.random.choice(list(range(len(all_next_targets))), p=probs.cpu().numpy())
        else:
            target_id = np.argmax(probs.cpu().numpy())
        _target = all_next_targets[target_id]
        self_x,self_y = state['game_state']['self'][3]
        target_x,target_y = _target['target_position']
        distance = abs(self_x-target_x) + abs(self_y-target_y)
        target = {
            'target_name':_target['target_name'],
            'target_position': _target['target_position'],
            'cur_step': 0,
            'max_step': distance + 100
        }
        return target
    
    def train_low(self,batch_data):
        """
        (observation, action, target, reward, next_observation, done)
        observation: np.array shape = [B,C,H,W]
        action: np.array shape = [B]
        target: np.array shape = [B, 6]
        reward: np.array shape = [B]
        next_observation: np.array shape = [B,C,H,W]
        next_target: np.array shape = [B, 6]
        done: np.array shape = [B]
        任务类型，任务x坐标，任务y坐标，任务距离，是否可以炸到目标，到目标的下一步行动
        """
        self.low_q_net.train()
        self.target_low_q_net.eval()
        batch_data = self._prepare_train_input(batch_data)
        observation, action, target, reward, next_observation,next_target, done = batch_data

        action = action.unsqueeze(1)
        q_values = self.low_q_net(observation, target).gather(1,action).squeeze(1) # [B]
        max_next_q_values = self.target_low_q_net(next_observation, next_target).max(1)[0].view(-1) # [B]
        q_targets = reward + self.low_gamma * max_next_q_values * (1 - done)
        dqn_loss = torch.mean(F.mse_loss(q_values,q_targets))
        self.low_optimizer.zero_grad()
        dqn_loss.backward()
        nn.utils.clip_grad_norm_(self.low_q_net.parameters(), 3)
        self.low_optimizer.step()
        if self.low_count % self.target_update == 0:
            self.target_low_q_net.load_state_dict(self.low_q_net.state_dict())
        self.low_count += 1
    
    def train_high(self,batch_data):
        """
        (observation, target, reward, next_observation, next_target, done)
        observation: np.array shape = [B,C,H,W]
        target: np.array shape = [B, 4]
        reward: np.array shape = [B]
        next_observation: np.array shape = [B,C,H,W]
        next_target: np.array shape = [B, 4]
        done: np.array shape = [B]
        任务类型，任务x坐标，任务y坐标，任务距离
        """
        self.high_q_net.train()
        self.target_high_q_net.eval()
        batch_data = self._prepare_train_input(batch_data)
        observation, target, reward, next_observation, next_target, done = batch_data

        q_values = self.high_q_net(observation, target) # [B]
        # 下个状态最大Q值
        next_q_values = self.target_high_q_net(next_observation, next_target) # [B]
        q_targets = reward + self.high_gamma * next_q_values * (1 - done)
        dqn_loss = torch.mean(F.mse_loss(q_values,q_targets))
        self.high_optimizer.zero_grad()
        dqn_loss.backward()
        nn.utils.clip_grad_norm_(self.high_q_net.parameters(), 3)
        self.high_optimizer.step()
        if self.high_count % self.target_update == 0:
            self.target_high_q_net.load_state_dict(self.high_q_net.state_dict())
        self.high_count += 1
    
    def save_model(self,model_path):
        low_state_dict = self.low_q_net.state_dict()
        for key in low_state_dict:
            low_state_dict[key] = low_state_dict[key].data.cpu()
        high_state_dict = self.high_q_net.state_dict()
        for key in high_state_dict:
            high_state_dict[key] = high_state_dict[key].data.cpu()

        save_dict = {
            'low_state_dict': low_state_dict,
            'high_state_dict': high_state_dict
        }
        torch.save(save_dict,model_path)

    def load_model(self,model_path):
        state_dict = torch.load(model_path)
        self.low_q_net.load_state_dict(state_dict['low_state_dict'])
        self.high_q_net.load_state_dict(state_dict['high_state_dict'])

    def switch_train_mode(self):
        self.train = True
    
    def switch_eval_mode(self):
        self.train = False

