import copy
import os
import random
from typing import Dict, List, Tuple
import gymnasium as gym

import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os, pickle, json, socket
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class ReplayBuffer:
    """A simple numpy replay buffer."""

    def __init__(self, obs_dim: int, act_dim: int, size: int, batch_size: int = 32):
        self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
        self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
        self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
        self.rews_buf = np.zeros([size], dtype=np.float32)
        self.done_buf = np.zeros(size, dtype=np.float32)
        self.max_size, self.batch_size = size, batch_size
        self.ptr, self.size, = 0, 0

    def store(
        self,
        obs: np.ndarray,
        act: np.ndarray,
        rew: float,
        next_obs: np.ndarray,
        done: bool,
    ):

        # 确保 state 是 numpy array 类型
        if isinstance(obs, tuple):
            obs = obs[0]  # 取出第一个元素作为观测值
        if isinstance(next_obs, tuple):
            next_obs = next_obs[0]  # 取出第一个元素作为观测值
            
        self.obs_buf[self.ptr] = obs
        self.next_obs_buf[self.ptr] = next_obs
        self.acts_buf[self.ptr] = act
        self.rews_buf[self.ptr] = rew
        self.done_buf[self.ptr] = done
        self.ptr = (self.ptr + 1) % self.max_size
        self.size = min(self.size + 1, self.max_size)

    def sample_batch(self) -> Dict[str, np.ndarray]:
        idxs = np.random.choice(self.size, size=self.batch_size, replace=False)
        return dict(
            obs=self.obs_buf[idxs],
            next_obs=self.next_obs_buf[idxs],
            acts=self.acts_buf[idxs],
            rews=self.rews_buf[idxs],
            done=self.done_buf[idxs],
        )

    def __len__(self) -> int:
        return self.size
    

class GaussianNoise:
    """Gaussian Noise.
    Taken from https://github.com/vitchyr/rlkit
    """

    def __init__(
        self,
        action_dim: int,
        min_sigma: float = 1.0,
        max_sigma: float = 1.0,
        decay_period: int = 1000000,
    ):
        """Initialize."""
        self.action_dim = action_dim
        self.max_sigma = max_sigma
        self.min_sigma = min_sigma
        self.decay_period = decay_period

    def sample(self, t: int = 0) -> float:
        """Get an action with gaussian noise."""
        sigma = self.max_sigma - (self.max_sigma - self.min_sigma) * min(
            1.0, t / self.decay_period
        )
        return np.random.normal(0, sigma, size=self.action_dim)
    

class Actor(nn.Module):
    def __init__(self, in_dim: int, out_dim: int, hidden_num: int =64):
        """Initialize."""
        super(Actor, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(in_dim, hidden_num),
            nn.ReLU(),
            nn.Linear(hidden_num, hidden_num),
            nn.ReLU(),
            nn.Linear(hidden_num, out_dim),
            nn.Tanh(),
        )
        
    def forward(self, state: torch.Tensor) -> torch.Tensor:
        """Forward method implementation."""
        return self.net(state)


class Critic(nn.Module):
    def __init__(self, in_dim: int, hidden_num: int =64):
        """Initialize."""
        super(Critic, self).__init__()

        self.net = nn.Sequential(
            nn.Linear(in_dim, hidden_num),
            nn.ReLU(),
            nn.Linear(hidden_num, hidden_num),
            nn.ReLU(),
            nn.Linear(hidden_num, 1),
        )

    def forward(self, state: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
        """Forward method implementation."""
        sa = torch.cat([state, action], 1)
        return self.net(sa)


class TD3Agent:
    """TD3Agent interacting with environment.
    
    Attribute:
        actor1 (nn.Module): target actor model to select actions
        actor2 (nn.Module): target actor model to select actions
        actor_target1 (nn.Module): actor model to predict next actions
        actor_target2 (nn.Module): actor model to predict next actions
        actor_optimizer (Optimizer): optimizer for training actor
        critic1 (nn.Module): critic model to predict state values
        critic2 (nn.Module): critic model to predict state values
        critic_target1 (nn.Module): target critic model to predict state values
        critic_target2 (nn.Module): target critic model to predict state values        
        critic_optimizer (Optimizer): optimizer for training critic
        memory (ReplayBuffer): replay memory to store transitions
        batch_size (int): batch size for sampling
        gamma (float): discount factor
        tau (float): parameter for soft target update
        initial_random_steps (int): initial random action steps
        exploration_noise (GaussianNoise): gaussian noise for policy
        target_policy_noise (GaussianNoise): gaussian noise for target policy
        target_policy_noise_clip (float): clip target gaussian noise
        device (torch.device): cpu / gpu
        transition (list): temporory storage for the recent transition
        policy_update_freq (int): update actor every time critic updates this times
        total_step (int): total step numbers
        is_test (bool): flag to show the current mode (train / test)
    """

    def __init__(
        self,
        memory_size: int,
        batch_size: int,
        gamma: float = 0.99,
        tau: float = 5e-3,
        exploration_noise: float = 0.1,
        target_policy_noise: float = 0.2,
        target_policy_noise_clip: float = 0.5,
        initial_random_steps: int = int(1e4),
        policy_update_freq: int = 2,
        obs_dim: int=4,
        action_dim: int=1,
        q_num: int=2,
        hidden_num: int=64,
        env = None,
    ):
        """Initialize."""
        obs_dim = obs_dim
        action_dim = action_dim

        self.memory = ReplayBuffer(obs_dim, action_dim, memory_size, batch_size)
        self.batch_size = batch_size
        self.gamma = gamma
        self.tau = tau
        self.initial_random_steps = initial_random_steps
        self.policy_update_freq = policy_update_freq

        # device: cpu / gpu
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(self.device)
        
        # lr_actor = 3e-4*np.sqrt(batch_size/256.0)# scaling batch size
        # lr_critic = 3e-4*np.sqrt(batch_size/256.0)# scaling batch size
        lr_actor = 3e-4
        lr_critic = 3e-4
        
        # noise
        self.exploration_noise = GaussianNoise(
            action_dim, exploration_noise, exploration_noise
        )
        self.target_policy_noise = GaussianNoise(
            action_dim, target_policy_noise, target_policy_noise
        )
        self.target_policy_noise_clip = target_policy_noise_clip

        # networks
        self.actor = Actor(obs_dim, action_dim, hidden_num).to(self.device)
        self.actor_target = Actor(obs_dim, action_dim, hidden_num).to(self.device)
        self.actor_target.load_state_dict(self.actor.state_dict())

        self.q_num = q_num
        # q function, N for overestimation
        self.q_model = [Critic(obs_dim + action_dim, hidden_num=hidden_num).to(self.device) for _ in range(q_num)]

        self.q_model_target = [Critic(obs_dim + action_dim, hidden_num=hidden_num).to(self.device) for _ in range(q_num)]
        for i in range(q_num):
            self.q_model_target[i].load_state_dict(self.q_model[i].state_dict())

        # optimizer
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr_actor)
        self.q_model_optim = [optim.Adam(self.q_model[i].parameters(), lr=lr_critic) for i in range(q_num)]

        # transition to store in memory
        self.transition = list()

        # total steps count
        self.total_step = 0

        # update step for actor
        self.update_step = 0

        # mode: train / test
        self.is_test = False

        # gym
        self.env = env

    def select_action(self, state: np.ndarray) -> np.ndarray:
        """Select an action from the input state."""
        # if initial random action should be conducted
        if self.total_step < self.initial_random_steps:
            selected_action = [random.uniform(-1, 1)]
        else:
            # 确保 state 是 numpy array 类型
            if isinstance(state, tuple):
                state = state[0]  # 取出第一个元素作为观测值
                
            selected_action = (
                self.actor(torch.FloatTensor(state).to(self.device))[0]
                .detach()
                .cpu()
                .numpy()
            )

        # add noise for exploration during training
        noise = self.exploration_noise.sample()
        selected_action = np.clip(
            selected_action + noise, -1.0, 1.0
        )
        self.transition = [state, selected_action]

        return selected_action

    def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
        """Take an action and return the response of the env."""
        # print(f'step action is {action}')
        next_state, reward, terminated, truncated, _= self.env.step(action)
        done = terminated or truncated

        self.transition += [reward, next_state, done]
        self.memory.store(*self.transition)
        
        # memory save for pretrain
        memory_demo.append(tuple(self.transition))
    
        return next_state, reward, done

    def update_model(self) -> torch.Tensor:
        """Update the model by gradient descent."""
        device = self.device  # for shortening the following lines

        samples = self.memory.sample_batch()
        states = torch.FloatTensor(samples["obs"]).to(device)
        next_states = torch.FloatTensor(samples["next_obs"]).to(device)
        actions = torch.FloatTensor(samples["acts"]).to(device)
        rewards = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
        dones = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
        masks = 1 - dones
        
        # get actions with noise
        noise = torch.FloatTensor(self.target_policy_noise.sample()).to(device)
        clipped_noise = torch.clamp(
            noise, -self.target_policy_noise_clip, self.target_policy_noise_clip
        )

        next_actions = (self.actor_target(next_states) + clipped_noise).clamp(
            -1.0, 1.0
        )
        
        q_pred_l = list()
        for i in range(len(self.q_model)):
            q_pred_l.append(self.q_model[i](states, actions))
            
        # min (Q_1', Q_2')
        for i in range(self.q_num):
            if i == 0:
                q_pred = self.q_model[i](next_states, next_actions) # s', a'效果好
            else:
                q_pred = torch.min(q_pred, self.q_model[i](next_states, next_actions))

        # G_t   = r + gamma * v(s_{t+1})  if state != Terminal
        #       = r                       otherwise
        q_target = rewards + self.gamma * q_pred * masks

        # critic loss
        q_loss_l = list()
        for i in range(len(self.q_model)):
            q_loss_l.append( F.mse_loss(q_pred_l[i], q_target.detach()))

        qf_loss = torch.tensor([0.0]).to(device=self.device)
        # train Q functions loss单独优化q效果更好
        for i in range(self.q_num):
            self.q_model_optim[i].zero_grad()
            q_loss_l[i].backward()
            self.q_model_optim[i].step()
            qf_loss += q_loss_l[i]

        if self.total_step % self.policy_update_freq == 0:
            # train actor
            next_actions_actor = self.actor(states)  # 再次前向传播生成独立计算图

            q_values = self.q_model[0](states, next_actions_actor)
            for i in range(1, self.q_num):
                q_values = torch.min(q_values, self.q_model[i](states, next_actions_actor))

            actor_loss = -q_values.mean() # q0 or q_min? 1980epoch稳定9.3以上，q_min更快到达
            
            # actor_loss = -self.q_model[0](states, self.actor(states)).mean()# 2000epoch稳定9.3以上
            # 上面的对比测试都是q10, 无scaling
            
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            self.actor_optimizer.step()

            # target update
            self._target_soft_update()
        else:
            actor_loss = torch.zeros(1)

        return actor_loss.data, qf_loss.data


    def train(self, num_frames: int):
        """Train the agent."""
        # env reset
        state = self.env.reset()
        score = 0
        i_episode = 0
        frame_count = 0
        for self.total_step in range(1, num_frames + 1):
            action = self.select_action(state)
            next_state, reward, done = self.step(action)
            state = next_state
            
            frame_count += 1
            score += reward

            # if episode ends
            if done:
                i_episode += 1
                recorder['score'].append(score/frame_count)
                print(
                    f'episode is {i_episode}, score is {score/frame_count}')

                state = self.env.reset()
                score = 0
                frame_count = 0

            # if training is ready
            if (
                len(self.memory) >= self.batch_size
                and self.total_step > self.initial_random_steps
            ):
                actor_loss, critic_loss = self.update_model()
                recorder['actor_loss'].append(actor_loss)
                recorder['critic_loss'].append(critic_loss)


    def _target_soft_update(self):
        """Soft-update: target = tau*local + (1-tau)*target."""
        tau = self.tau
        for t_param, l_param in zip(
            self.actor_target.parameters(), self.actor.parameters()
        ):
            t_param.data.copy_(tau * l_param.data + (1.0 - tau) * t_param.data)

        for i in range(self.q_num):
            for t_param, l_param in zip(
                self.q_model_target[i].parameters(), self.q_model[i].parameters()
            ):
                t_param.data.copy_(tau * l_param.data + (1.0 - tau) * t_param.data)


def save_best_model(path):
    try:
        #gpu tensor format save
        torch.save(obj={
            'policy_net_state_dict': agent.actor.state_dict(),
        }, f=path)
        logger.info("save models success")
    except:
        logger.warning("save models failed.")


import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--q_num', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--port', type=int, default=10012)
args = parser.parse_args()

HOME = os.getcwd()
# 训练效果记录
recorder = dict({
    'score': [],
    'actor_loss': [],
    'critic_loss': [],
})

memory_demo = list()
save_model_path = os.path.join(HOME, 'TD3_model_angle_speed.pt')
recorder_path = os.path.join(HOME, 'TD3_record_angle_speed.pkl')
save_memory_path = os.path.join(HOME, f'pretrain_memory_angle_speed.pkl')

my_env = gym.make('InvertedDoublePendulum-v5', healthy_reward=10, reset_noise_scale=0.1)
# my_env = gym.make('Reacher-v5')
observation_dim = my_env.observation_space.shape[0]
action_dim = my_env.action_space.shape[0]

# parameters
num_frames = 500_000
memory_size = 300_000
initial_random_steps = 10_000

agent = TD3Agent(
    memory_size,
    batch_size=args.batch_size,
    initial_random_steps=initial_random_steps,
    obs_dim=observation_dim,
    action_dim=action_dim,
    q_num = args.q_num,
    hidden_num=args.width,
    env = my_env,
)
agent.train(num_frames=num_frames)
# save to file for ploting
with open(recorder_path, 'wb') as f:
    pickle.dump(recorder, f)
# save memory for pretrained
with open(save_memory_path, 'wb') as f:
    pickle.dump(memory_demo, f)
    
save_best_model(save_model_path)
print('Complete')