
from collections import deque
import random
import numpy as np

from .utils import ACTIONS,get_bomb_map,vaild_action_filter
import events as e
from .utils import can_reach,search_target_path
import copy

class ReplayMemory:
    def __init__(self, capacity, gamma = 0.9, steps = 5, stack_frames = 1):
        """
        capacity: 当前容量
        gamma: 奖励衰减系数
        steps: 向后考虑的状态数量
        stack_frames: 一次输入神经网络的帧数
        """
        self.round_num = 0
        self.low_memory = deque(maxlen=capacity)
        self.high_memory = deque(maxlen=capacity)
        self.start_high = True
        self.steps = steps
        self.gamma = gamma
        self.short_gamma = 0.0
        self.stack_frames = stack_frames
        self.next_bomb_time = 0
        
    def action(self,state, model):
        """
        获取下一步行动，并且把state推入缓存
        """
        if self.start_high:
            state['target'] = model.action_high(state,self.get_all_next_targets(state['game_state']))
            self.high_memory.append(copy.deepcopy(state))
            self.start_high = False
        else:
            state['target'] = copy.deepcopy(self.low_memory[-1]['target'])

        # 更新target信息
        # 这个函数比较耗时尽量少用
        self._update_target(state)
        state['action'] = model.action_low(state)
        if self.next_bomb_time > 0:
            self.next_bomb_time -= 1
        
        action_ids = vaild_action_filter(state['game_state'])
        actions = [ACTIONS[action_id] for action_id in action_ids]

        if state['action'] == 'BOMB' and 'BOMB' in actions:
            self.next_bomb_time = 6
        
        self.low_memory.append(state)
        return state['action']

    def push(self, new_game_state, events=None):
        """
        previous_state->action->events->reward->done(?)->new_game_state
        new_game_state为None的话说明游戏已经结束
        只是计算reward和done
        """
        if len(self.low_memory) == 0:
            return
        previous_state = self.low_memory[-1]
        self.round_num += int(new_game_state is None)
        if new_game_state is None:
            self.next_bomb_time = -1
        if events is None: events = []

        if new_game_state is not None:
            all_events = self.get_all_events(previous_state['game_state'],
                                            previous_state['target'],
                                            previous_state['action'],
                                            new_game_state,events)
        else:
            all_events = events
        print(all_events)
        
        high_reward,low_reward,short_reward = self.get_rewards(all_events,previous_state['target'])
        previous_state['high_reward'] = high_reward
        previous_state['reward'] = low_reward
        previous_state['short_reward'] = short_reward
        if high_reward != 0:
            previous_state['done'] = 1
        else:
            previous_state['done'] = int(new_game_state is None)
        previous_state['high_done'] = int(new_game_state is None)

        if previous_state['done'] == 1:
            self.high_memory[-1]['reward'] = previous_state['high_reward']
            self.high_memory[-1]['done'] = previous_state['high_done']
            # 完成之后进行下一个高级决策
            self.start_high = True

    def sample_low(self, batch_size):
        """
        低层次agent采样数据训练
        observation: np.array shape = [B,C,H,W]
        action: np.array shape = [B]
        target: np.array shape = [B, 6]
        reward: np.array shape = [B]
        next_observation: np.array shape = [B,C,H,W]
        done: np.array shape = [B]
        """
        self.memory = self.low_memory
        batch_size = min(len(self.memory)-self.steps-1,batch_size)
        observations,actions,targets,rewards,next_observations,next_targets,dones = [],[],[],[],[],[],[]
        
        chosen_frame_ids = random.sample(range(len(self.memory)-self.steps-1),batch_size)
        # chosen_frame_ids.sort()
        for frame_id in chosen_frame_ids:
            
            reward, next_frame_id = self._caculate_accumulated_reward(frame_id)
            observations.append(self._frames_observation(frame_id))
            actions.append(ACTIONS.index(self.memory[frame_id]['action']))
            rewards.append(reward)
            next_observations.append(self._frames_observation(next_frame_id))
            dones.append(self.memory[next_frame_id-1]['done'])

            targets.append(self.memory[frame_id]['target']['target_code'])
            next_targets.append(self.memory[next_frame_id]['target']['target_code'])

        return np.array(observations),np.array(actions),np.array(targets),np.array(rewards),\
                np.array(next_observations),np.array(next_targets),np.array(dones)

    def sample_high(self, batch_size):
        """
        高层次agent采样数据训练
        observation: np.array shape = [B,C,H,W]
        target: np.array shape = [B, 4]
        reward: np.array shape = [B]
        next_observation: np.array shape = [B,C,H,W]
        next_target: np.array shape = [B, 4]
        done: np.array shape = [B]
        """
        self.memory = self.high_memory
        batch_size = min(len(self.memory)-self.steps-1,batch_size)
        observations,targets,rewards,next_observations,next_targets,dones = [],[],[],[],[],[]
        chosen_frame_ids = random.sample(range(len(self.memory)-self.steps-1),batch_size)
        # chosen_frame_ids.sort()
        for frame_id in chosen_frame_ids:
            
            reward, next_frame_id = self._caculate_accumulated_reward(frame_id)
            observations.append(self._frames_observation(frame_id))
            rewards.append(reward)
            next_observations.append(self._frames_observation(next_frame_id))
            dones.append(self.memory[next_frame_id-1]['done'])
            
            state = self.memory[frame_id]
            next_state = self.memory[next_frame_id]
            targets.append(self._candidate_target_code(state['game_state'],state['target']['target_name'],state['target']['target_position']))
            next_targets.append(self._candidate_target_code(next_state['game_state'],next_state['target']['target_name'],next_state['target']['target_position']))

        return np.array(observations),np.array(targets),np.array(rewards),\
            np.array(next_observations),np.array(next_targets),np.array(dones)

    # def __len__(self):
    #     return len(self.memory)

    # def __getitem__(self,i):
    #     return self.memory[i]
    
    # def __setitem__(self,i,value):
    #     self.memory[i] = value
    
    def _frames_observation(self,frame_id):
        """
        获取多帧observation,并且在通道维度进行拼接
        """

        frames = []
        for i in range(frame_id,max(-1,frame_id-self.stack_frames),-1):
            frames.append(self.memory[i]['observation'])
            if self.memory[i]['done'] == 1:
                break
        
        while len(frames) < self.stack_frames:
            frames.append(frames[-1])

        frames = np.concatenate(frames,axis=0)
        return frames

    def _caculate_accumulated_reward(self,frame_id):
        """
        计算累计奖励
        Return reward, next_frame_id
        """
        
        # 找到下一个frame_id
        # 最后一帧应该是的 observation 应该是 None
        next_frame_id = frame_id+1
        while next_frame_id < len(self.memory)-1 and self.memory[next_frame_id-1]['done'] == 0 and \
            next_frame_id - frame_id < self.steps:

            next_frame_id += 1

        _reward = 0
        _short_reward = 0
        for future_frame_id in range(next_frame_id, frame_id-1,-1):
            _reward *= self.gamma
            _short_reward *= self.short_gamma
            _reward += self.memory[future_frame_id]['reward']
            _short_reward += self.memory[future_frame_id].get('short_reward',0)
        
        return _short_reward + _reward, next_frame_id

    def get_rewards(self,events, target):
        """
        根据事件计算奖励
        """
        high_game_rewards = {
            e.TARGET1_COMPLETED: 50 / (1+target['max_step']),   # 基础分1, 绩效分2
            e.TARGET2_COMPLETED:  20 / (1+target['max_step']),  # 基础分1, 绩效分2
            e.TARGET3_COMPLETED: 20 / (1+target['max_step']),
            e.TARGET1_FAIL: 50 / (5+target['max_step']),
            e.TARGET1_TIMEOUT: 50 / (5+target['max_step']),
            e.TARGET2_FAIL: 20 / (5+target['max_step']),
            e.TARGET3_TIMEOUT: 20 / (5+target['max_step']),
            e.TARGET3_FAIL: 20 / (5+target['max_step']),
            # e.KILLED_SELF: -5,
            # e.GOT_KILLED: -8,
        }
        low_game_rewards = {
            e.TARGET1_COMPLETED: 3 - 2 * target['cur_step'] / target['max_step'],   # 基础分1, 绩效分2
            e.TARGET2_COMPLETED:  3 - 2 * target['cur_step'] / target['max_step'], # 基础分1, 绩效分2
            e.TARGET3_COMPLETED: 8 - 3 * target['cur_step'] / target['max_step'],
            e.TARGET1_FAIL: -0.1,
            e.TARGET1_TIMEOUT: -0.1,
            e.TARGET2_FAIL: -0.1,
            e.TARGET3_TIMEOUT: -0.1,
            e.TARGET3_FAIL: -0.1,
            e.KILLED_SELF: -15,
            e.GOT_KILLED: -15,
        }
        short_game_rewards = {
            e.CORRECT_DIRECTION: 5,
            e.TARGET2_COMPLETED:  8,
            e.PLACEHOLDER_EVENT: -.1,
            e.WAITED: -0.3,
        }
        high_reward,low_reward,short_reward = 0,0,0
        for event in events:
            high_reward += high_game_rewards.get(event,0)
            low_reward += low_game_rewards.get(event,0)
            short_reward += short_game_rewards.get(event,0)
        return high_reward,low_reward,short_reward

    def get_all_events(self, old_game_state, target, action, new_game_state,events = None):
        """
        获取所有事件
        """
        if events is None:
            events = set()
        else:
            events = set(events)
        
        if target['target_name'] is None:
            return list(events)

        # 是否按照规定线路行驶
        if len(target['target_direction']) >0 and action == target['target_direction'][0]:
            events.add(e.CORRECT_DIRECTION)
        
        # 判断吃金币事件
        if target['target_name'] == 'eat_coin':
            # 吃到金币
            if target['target_position'] in old_game_state['coins'] and new_game_state['self'][3] == target['target_position']:
                events.add(e.TARGET1_COMPLETED)
            # 金币被别的agent吃掉
            if target['target_position'] in old_game_state['coins'] and target['target_position'] not in new_game_state['coins'] and\
                 new_game_state['self'][3] != target['target_position']:
                events.add(e.TARGET1_FAIL)
            # 任务一超时
            if target['cur_step'] > target['max_step']:
                events.add(e.TARGET1_TIMEOUT)
        
        # 判定炸箱子
        elif target['target_name'] == 'bomb_crate':
            # 炸掉箱子
            if target['can_bomb_target'] and action == 'BOMB' and e.INVALID_ACTION not in events:
                events.add(e.TARGET2_COMPLETED)
            # 箱子被别人炸掉了
            if new_game_state['field'][target['target_position'][0],target['target_position'][1]] == 0:
                events.add(e.TARGET2_FAIL)
            # 任务二超时
            if target['cur_step'] > target['max_step']:
                events.add(e.TARGET2_TIMEOUT)
    
        # 判定炸其他agent
        else:
            if target['can_bomb_target'] and action == 'BOMB' and e.INVALID_ACTION not in events:
                events.add(e.TARGET3_COMPLETED)

            if target['cur_step'] > target['max_step']:
                events.add(e.TARGET3_TIMEOUT)
        
        events = list(events)
        return events
    
    def get_all_next_targets(self,game_state):
        """
        获取所有的下一步目标
        """
        next_targets = []
        H,W = game_state['field'].shape
        self_x = game_state['self'][3][0]
        self_y = game_state['self'][3][1]
        bomb_map = get_bomb_map(game_state)
        bomb_area = (bomb_map.sum(axis=0)>0).astype(np.int32)
        field = (1-bomb_area) * game_state['field']

        # 找金币
        for coin in game_state['coins']:
            if not can_reach((self_x,self_y),coin,field):
                continue
            target = {
                'target_name':'eat_coin',
                'target_position':coin,
                'target_high_code':self._candidate_target_code(game_state,'eat_coin',coin)
            }
            next_targets.append(target)

        # 找箱子 被炸掉的箱子就不再去了
        for x in range(H):
            for y in range(W):
                if game_state['field'][x,y] == 1 and\
                    bomb_area[x,y] == 0 and\
                     can_reach((self_x,self_y),(x,y),field):
                    target = {
                        'target_name':'bomb_crate',
                        'target_position':(x,y),
                        'target_high_code':self._candidate_target_code(game_state,'bomb_crate',(x,y))
                    }
                    next_targets.append(target)
        
        # 炸其他人
        for other in game_state['others']:
            if not can_reach((self_x,self_y),other[3],field):
                continue
            target = {
                'target_name':f'agent_{other[0]}',
                'target_position': other[3],
                'target_high_code':self._candidate_target_code(game_state,f'agent_{other[0]}',other[3])
            }
            next_targets.append(target)

        if len(next_targets)==0:
            # 应该结束的时候有炸弹，并不结束
            target = {
                'target_name':f'eat_coin',
                'target_position': (1,1),
                'target_high_code':[0,1,1,10]
            }
            next_targets.append(target)

        return next_targets

    def _update_target(self,state):
        """
        对state的target进行更新
        """
        target = state['target']
        game_state = state['game_state']
        channels = state['observation']
        if len(self.low_memory) >0 and self.low_memory[-1]['high_done'] ==0:
            pre_game_state = self.low_memory[-1]['game_state']
        else:
            pre_game_state = None

        self_x,self_y = game_state['self'][3]
        target_name = target['target_name']
        if target_name != 'eat_coin' and target_name != 'bomb_crate':
            bomb_agent_name = target_name.replace('agent_','')
            for agent in game_state['others']:
                if agent[0] == bomb_agent_name:
                    state['target_position'] = agent[3]

        target_x,target_y = target['target_position']

        can_bomb_target = True
        if self_x == target_x and abs(self_y - target_y) <= 3:
            for y in range(min(self_y,target_y),max(self_y,target_y)+1):
                if game_state['field'][self_x,y] == -1:
                    can_bomb_target = False
        elif self_y == target_y and abs(self_x - target_x) <= 3:
            for x in range(min(self_x,target_x),max(self_x,target_x)+1):
                if game_state['field'][x,self_y] == -1:
                    can_bomb_target = False
        else:
            can_bomb_target = False
        target['can_bomb_target'] = can_bomb_target

        target['target_direction'] = search_target_path(game_state,target,pre_game_state,self.next_bomb_time)

        target['cur_step'] += 1

        target['target_code'] = self._target_code(game_state,target)

    def _candidate_target_code(self,game_state,target_name,target_position):
        """
        编码候选目标: 任务类型，任务x坐标，任务y坐标，任务距离
        """
        if target_name == 'eat_coin':
            target_type = 0
        elif target_name == 'bomb_crate':
            target_type = 1
        else:
            target_type = 2
        target_x,target_y = target_position
        name, score, bombs_left, (self_x, self_y) = game_state['self']
        distance = abs(target_x - self_x) + abs(target_y - self_y)
        return [target_type,target_x,target_y,distance]
    
    def _target_code(self,game_state,target):
        """
        编码目标: 任务类型，任务x坐标，任务y坐标，任务距离，是否可以炸到目标，到目标的下一步行动
        """

        target_x = target['target_position'][0]
        target_y = target['target_position'][1]
        
        if target['target_name'] == 'eat_coin':
            target_type = 0
        elif target['target_name'] == 'bomb_crate':
            target_type = 1
        else:
            target_type = 2
        
        name, score, bombs_left, (self_x, self_y) = game_state['self']
        distance = abs(target_x - self_x) + abs(target_y - self_y)

        can_bomb_target = target['can_bomb_target']

        if len(target['target_direction']) > 0:
            action_id = ACTIONS.index(target['target_direction'][0])
        else:
            action_id = 4 # 等待

        target_code = [target_type,target_x,target_y,distance,int(can_bomb_target),action_id]
        
        return target_code

