#!/usr/bin/env python3
# -*- coding:utf-8 -*-

"""
@Project :gorge_walk
@File    :definition.py
@Author  :kaiwu
@Date    :2022/12/15 22:50

"""

import numpy as np
from kaiwu_agent.utils.common_func import create_cls, attached
from kaiwu_agent.utils.common_func import Frame
from diy.config import Config 

gamma = Config.GAMMA

SampleData = create_cls("SampleData", state=None, action=None, reward=None, next_state=None, done=None)


ObsData = create_cls("ObsData", feature=None)


ActData = create_cls("ActData", act=None)


@attached
def observation_process(raw_obs):
    state = np.array([raw_obs[0], *raw_obs[129:250]])
    # obstacle_flat = raw_obs[140:165]
    # up_obstacle = obstacle_flat[7]
    # left_obstacle = obstacle_flat[11]
    # right_obstacle = obstacle_flat[13]
    # down_obstacle = obstacle_flat[17]
    # obstacle = [up_obstacle, down_obstacle, left_obstacle, right_obstacle]
    # state[0] = 16*state[0] + sum([obstacle[i]*(2**i) for i in range(4)])    
    return ObsData(feature=state)


@attached
def action_process(act_data):
    return act_data.act


@attached
def sample_process(list_game_data):
    list_sample_data = [SampleData(**i.__dict__) for i in list_game_data]
# 获取n步之后的状态，动作
    n_step = len(list_sample_data)
    n_step_reward = 0
    n_step_done = list_sample_data[-1].done
    n_step_next =  list_sample_data[-1].next_state
    for index, sample in enumerate(list_sample_data):
        reward = sample.reward
        next_state = sample.next_state
        done = sample.done
        n_step_reward = n_step_reward + gamma**(index)*reward
        n_step_next, n_step_done = (next_state, done) if done else (n_step_next, n_step_done)
    sample = list_sample_data[0]
    state = sample.state
    state[0] = state[0]/16
    n_step_next[0] = n_step_next[0]/16
    action = sample.action
    frame = Frame(
        state = state,
        action = action,
        reward =n_step_reward,
        next_state = n_step_next,
        done = n_step_done
    )
    return [SampleData(**frame.__dict__)]


def reward_shaping(frame_no, score, terminated, truncated, obs, _obs):
    reward = 0
    # The reward for winning
    # 奖励1. 获胜的奖励
    if terminated:
        reward += score * 20
    
    # 在q_learning里该奖励疑似会让智能体撞死在终点前
    # The reward for being close to the finish line
    # 奖励2. 靠近终点的奖励:
    if not terminated and obs[129] > _obs[129]:
        reward += 10
    if not terminated and obs[129] < _obs[129]:
        reward -= 10
    
    # The reward for obtaining a treasure chest
    # 奖励3. 获得宝箱的奖励
    if score > 0 and not terminated:
        reward += score * 2
    
    # The reward for being close to the treasure chest (considering only the nearest one)
    # 奖励4. 靠近宝箱的奖励(只考虑最近的那个宝箱)
    treasure_dist = obs[130:140]
    avaliable_treasure = treasure_dist[obs[240:250]==1]
    nearest_treasure = np.argmin(avaliable_treasure)
    if not terminated and obs[nearest_treasure] > _obs[nearest_treasure]:
        reward += 1
    
    
    # Penalty for the number of steps
    # 奖励5. 步数惩罚
    if not terminated:
        reward += -1
        rational_step = 1500
        if frame_no >= rational_step:
            reward -= 3
    
    '''
    # Penalty for the bump into a wall
    # 奖励6.撞墙惩罚
    if obs[0] == _obs[0] and not terminated:
        reward += -2
    '''
    
    # Penalty for the overtime
    # 奖励7：超时惩罚
    if truncated:
        reward += -500
    
    '''
    # The reward for finding the destination
    # 奖励8：发现终点
    if 1 in _obs[190:215] and 1 not in obs[190:215]:
        reward += 100
    # Penalty to prevent p;ayer in and out again and again
    # 为了制止玩家反复进出终点区刷分
    if 1 in obs[190:215] and 1 not in _obs[190:215]:
        reward -= 100
    '''
    
    # Penalty for repeating position
    # 奖励9：重复出现在一个位置的惩罚
    if not terminated:
        memory = _obs[215:240] # 局部视野中所有格子的记忆信息
        current_position_time = memory[12]
        reward -= current_position_time*10 if current_position_time > 0 else -1 # 探索没有去过的地区奖励
    
    return reward

# 编码解码函数的每一位都需要对齐，否则将产生数据错误
@attached
def SampleData2NumpyData(g_data):
    return np.hstack((
                    np.array(g_data.state,dtype=np.float32),
                    np.array(g_data.next_state,dtype=np.float32),
                    np.array(g_data.action,dtype=np.float32),
                    np.array(g_data.reward,dtype=np.float32),          
                    np.array(g_data.done,dtype=np.float32)
                ))

@attached
def NumpyData2SampleData(s_data):
    return SampleData(
        state = s_data[0:Config.STATE_SIZE],
        next_state = s_data[Config.STATE_SIZE:2*Config.STATE_SIZE],
        action = s_data[-3],
        reward = s_data[-2],
        done = s_data[-1])
