#!/usr/bin/env python3
# -*- coding:utf-8 -*-

"""
@Project :gorge_walk
@File    :agent.py
@Author  :kaiwu
@Date    :2022/11/11 12:47

"""

import kaiwu_agent
from kaiwu_agent.agent.base_agent import BaseAgent
import numpy as np
import os
import torch
import torch.nn.functional as F
from diy.model import model
from diy.feature.definition import ActData
from kaiwu_agent.agent.base_agent import (
    learn_wrapper,
    save_model_wrapper,
    load_model_wrapper,
    predict_wrapper,
    exploit_wrapper,
    check_hasattr,
)
from diy.config import Config


class Agent(BaseAgent):
    def __init__(self, agent_type="player", device=None, logger=None, monitor=None) -> None:
        super().__init__(agent_type, device, logger, monitor)
        self.logger = logger
        self.n_step = Config.N_STEP
        self.state_size = Config.STATE_SIZE
        self.action_size = Config.ACTION_SIZE
        self.learning_rate = Config.LEARNING_RATE
        self.gamma = Config.GAMMA
        self.epsilon = Config.EPSILON
        self.episodes = Config.EPISODES
        self.target_update = Config.TARGET_UPDATE
        self.count = 0
        self.q_net = model.Model(self.state_size, self.action_size)
        self.target_net = model.Model(self.state_size, self.action_size)
        self.optimizer = torch.optim.Adam(self.q_net.parameters(), lr=self.learning_rate)
    @predict_wrapper
    def predict(self, list_obs_data):
        state = list_obs_data[0].feature
        legal_action = 1 - state[[19, 23, 25, 29]] # 上下左右四个方向是否有障碍物，1为有
        state = torch.tensor(state)
        action = self._epsilon_greedy(state=state, legal_action=legal_action)
        return [ActData(act=action)]
    @exploit_wrapper
    def exploit(self, list_obs_data):
        state = list_obs_data[0].feature
        legal_action = 1 - state[[19, 23, 25, 29]] # 上下左右四个方向是否可以通行，1为可以
        state = torch.tensor(state) 
        if np.random.rand() < 0.2:
            action = np.random.choice(np.where(legal_action==1)[0])
        else:
            with torch.no_grad():
                q_values = self.q_net(state.unsqueeze(0))  # 增加一个维度，因为模型期望batch size
                legal_action = torch.tensor(legal_action).view(q_values.shape)
                action = torch.argmax(q_values[legal_action == 1]).item()
        return [ActData(act=action)]
            
    def _epsilon_greedy(self, state, legal_action):
        if np.random.rand() <= self.epsilon:
            action = np.random.choice(np.where(legal_action==1)[0])
        else:
            with torch.no_grad():
                q_values = self.q_net(state.unsqueeze(0))# 增加一个维度，因为模型期望batch size
                legal_action = torch.tensor(legal_action).view(q_values.shape)
                action = torch.argmax(q_values[legal_action == 1]).item()
        return action
    @learn_wrapper
    def learn(self, sample_data):
        sample_data = sample_data[0]
        state, n_step_next, action, n_step_reward, n_step_done = (
            sample_data.state,
            sample_data.next_state,
            sample_data.action,
            sample_data.reward,
            sample_data.done
        )
        state = torch.tensor(state).view(-1, self.state_size)
        action = torch.tensor(action).view(-1, 1)
        n_step_reward = torch.tensor(n_step_reward).view(-1, 1)
        n_step_next =torch.tensor(n_step_next).view(-1, self.state_size)
        n_step_done = torch.tensor(n_step_done).view(-1, 1)
        all_action_q_values = self.q_net(state).view(-1, self.action_size)
        q_values = all_action_q_values.gather(1, action.long())
        # 调整Q的维度为n*1，这里只是target-net，还不是Double DQN
        # teasor.max 沿指定维度返回两个张量，第一个为最大值张量，第二个为它们对应的索引
        # max_next_q_values = self.target_net(next_state).max(1)[0].view(-1, 1)
        # DDQN
        best_action = all_action_q_values.max(1)[1].view(-1, 1)
        max_n_step_q_values = (self.target_net(n_step_next)).detach().gather(1, best_action.long())
        mask = 1 - n_step_done
        q_targets = n_step_reward + self.gamma**self.n_step*max_n_step_q_values*mask
        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))
        
        self.optimizer.zero_grad()

        dqn_loss.backward()

        self.optimizer.step()

        if self.count % self.target_update == 0:
            self.target_net.load_state_dict(self.q_net.state_dict())
        self.count = self.count + 1
        


    @save_model_wrapper
    def save_model(self, path=None, id="1"):
        # To save the model, it can consist of multiple files,
        # and it is important to ensure that each filename includes the "model.ckpt-id" field.
        # 保存模型, 可以是多个文件, 需要确保每个文件名里包括了model.ckpt-id字段
        model_file_path = f"{path}/model.ckpt-{str(id)}.pkl"
        torch.save(self.q_net, model_file_path)
        self.logger.info(f"save model {model_file_path} successfully")

    @load_model_wrapper
    def load_model(self, path=None, id="1"):
        # When loading the model, you can load multiple files,
        # and it is important to ensure that each filename matches the one used during the save_model process.
        # 加载模型, 可以加载多个文件, 注意每个文件名需要和save_model时保持一致
        model_file_path = f"{path}/model.ckpt-{str(id)}.pkl"
        try:
            self.q_net = torch.load(model_file_path)
            self.logger.info(f"load model {model_file_path} successfully")
        except FileNotFoundError:
            self.logger.info(f"File {model_file_path} not found")
            exit(1)
