import math 
from constants import * 
import gym
import numpy as np 
import os 
import cv2
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
from game import Game

class SnakeEnv(gym.Env) :
    def __init__(self , seed = 0 , silent_mode=True ,  limit_step = True) :
        super().__init__()
        self.game = Game(silent_mode=silent_mode)
        self.game.reset()
        self.silent_mode = silent_mode
        
        
        self.action_space = gym.spaces.Discrete(4) # 0: UP, 1: LEFT, 2: RIGHT, 3: DOWN
        
        self.observation_space = gym.spaces.Box(
            low=0, high=255,
            shape=(154, 280, 3),  # obs 形状是88，160, 3
            dtype=np.uint8
        )
        
        self.width_cell_num = self.game.map.width 
        self.height_cell_num = self.game.map.height
        self.grid_num = self.width_cell_num * self.height_cell_num 
        self.init_snake_length = len(self.game.snake)
        self.max_growth = self.grid_num - self.init_snake_length
        
        self.die = False
        
        if limit_step :
            self.step_limit = self.grid_num * 4 
        else :
            self.step_limit = 1e9
        
        self.reward_step_counter = 0 
        
    def reset(self):
        self.game.reset()

        self.die = False
        self.reward_step_counter = 0

        obs = self._generate_observation()
        return obs
    
    def step(self, action):
        '''
        info = {"snake_length": int, 
                "snake_head_position": np.array, 
                "prev_snake_head_position": np.array, 
                "bait_pos": np.array, 
                "bonus_pos":np.array,
                "food_obtained": bool
            }
        '''
        self.die, info = self.game.step(action)
        obs = self._generate_observation()

        reward = 0.0
        self.reward_step_counter += 1

        if info["snake_length"] == self.grid_num: # Snake fills up the entire board. Game over.
            reward = self.max_growth * 0.1 # Victory reward
            self.die = True
            if not self.silent_mode:
                self.game.sound_victory.play()
            return obs, reward, self.die, info
        
        if self.reward_step_counter > self.step_limit: # Step limit reached, game over.
            self.reward_step_counter = 0
            self.die = True
        
        if self.die: # Snake bumps into wall or itself. Episode is over.
            # Game Over penalty is based on snake size.
            reward = - math.pow(self.max_growth, (self.grid_num - info["snake_length"]) / self.max_growth) # (-max_growth, -1)            
            reward = reward * 0.1
            return obs, reward, self.die, info
          
        elif info["food_obtained"]: # Food eaten. Reward boost on snake size.
            reward = info["snake_length"] / self.grid_num
            self.reward_step_counter = 0 # Reset reward step counter
        
        else:
            # Give a tiny reward/penalty to the agent based on whether it is heading towards the food or not.
            # Not competing with game over penalty or the food eaten reward.
            if np.linalg.norm(info["snake_head_position"] - info["bait_pos"]) < np.linalg.norm(info["prev_snake_head_position"] - info["bait_pos"]):
                reward = 1 / info["snake_length"] 
            else:
                reward = - 1 / info["snake_length"] - 2
            
            if info["bonus_pos"] is not None:
                if np.linalg.norm(info["snake_head_position"] - info["bonus_pos"]) < np.linalg.norm(info["prev_snake_head_position"] - info["bonus_pos"]):
                    reward += 1 / info["snake_length"] + 10
                else:
                    reward += - 1 / info["snake_length"]  
            reward = reward * 0.1

        # max_score: 72 + 14.1 = 86.1
        # min_score: -14.1

        return obs, reward, self.die, info
    
    def render(self):
        self.game.render()

    def get_action_mask(self):
        return np.array([[self._check_action_validity(a) for a in range(self.action_space.n)]])
    
    
    # Check if the action is against the current direction of the snake or is ending the game.
    def _check_action_validity(self, action):
        current_direction = self.game.snake.direction
        snake_list = self.game.snake.positions
        row, col = snake_list[0]
        if action == UP : # UP
            if current_direction == DOWN:
                return False
            else:
                row -= 1

        elif action == LEFT: # LEFT
            if current_direction == RIGHT:
                return False
            else:
                col -= 1

        elif action == RIGHT: # RIGHT 
            if current_direction == LEFT:
                return False
            else:
                col += 1     
        
        elif action == DOWN: # DOWN 
            if current_direction == UP:
                return False
            else:
                row += 1

        # Check if snake collided with itself or the wall. Note that the tail of the snake would be poped if the snake did not eat food in the current step.
        if (row, col) == self.game.bait:
            game_over = (
                (row, col) in snake_list # The snake won't pop the last cell if it ate food.
                or row < 0
                or row >= self.height_cell_num
                or col < 0
                or col >= self.width_cell_num
                or self.game.map.is_obstacle((row,col))
            )
        else:
            game_over = (
                (row, col) in snake_list[:-1] # The snake will pop the last cell if it did not eat food.
                or row < 0
                or row >= self.height_cell_num
                or col < 0
                or col >= self.width_cell_num
                or self.game.map.is_obstacle((row,col))
            )


        if game_over:
            return False
        else:
            return True

    # EMPTY: BLACK; SnakeBODY: GRAY; SnakeHEAD: GREEN; FOOD: RED;
    def _generate_observation(self):
        obs = np.zeros((self.game.height_cell_num, self.game.width_cell_num), dtype=np.uint8)

        # Set the snake body to gray with linearly decreasing intensity from head to tail.
        obs[tuple(np.transpose(self.game.snake))] = np.linspace(200, 50, len(self.game.snake), dtype=np.uint8)
        
        # Stack single layer into 3-channel-image.
        obs = np.stack((obs, obs, obs), axis=-1)
        
        # Set the snake head to green and the tail to blue
        obs[tuple(self.game.snake[0])] = GREEN
        obs[tuple(self.game.snake[-1])] = RED
        for obstacle in self.game.map.obstacles:
            obs[tuple(obstacle)] = WHITE
        # Set the food to lure
        obs[self.game.bait.position] = LURE
        if self.game.bonus  : 
            obs[self.game.bonus.position] = GOLD

        # Enlarge the observation to 84x84
        obs = np.repeat(np.repeat(obs, 7, axis=0), 7, axis=1)
        # print(obs.shape)
        return obs
        

# # Test the environment using random actions
NUM_EPISODES = 100
RENDER_DELAY = 0.001
import time
from matplotlib import pyplot as plt

if __name__ == "__main__":
    env = SnakeEnv(silent_mode=True)
    
    num_success = 0
    for i in range(NUM_EPISODES):
        num_success += env.reset()
    print(f"Success rate: {num_success/NUM_EPISODES}")

    sum_reward = 0

    # 0: UP, 1: LEFT, 2: RIGHT, 3: DOWN
    action_list = [1, 1, 1, 0, 0, 0, 2, 2, 2, 3, 3, 3]
    
    for _ in range(NUM_EPISODES):
        obs = env.reset()
        done = False
        i = 0
        while not done:
            plt.imshow(obs, interpolation='nearest')
            plt.show()
            action = env.action_space.sample()
            # action = action_list[i]
            # i = (i + 1) % len(action_list)
            obs, reward, done, info = env.step(action)
            sum_reward += reward
            # if np.absolute(reward) > 0.001:
                # print(reward)
            env.render()
            
            time.sleep(RENDER_DELAY)
        # print(info["snake_length"])
        # print(info["food_pos"])
        # print(obs)
        print("sum_reward: %f" % sum_reward)
        print("episode done")
        # time.sleep(100)
    
    env.close()
    print("Average episode reward for random strategy: {}".format(sum_reward/NUM_EPISODES))
