import random
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
import gymnasium as gym
import pickle

class ReplayBuffer:
    """A simple numpy replay buffer."""

    def __init__(self, obs_dim: int, action_dim: int, size: int, batch_size: int = 32):
        """Initialize."""
        print(f'size is {size}, batch_size is {obs_dim}')
        self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
        self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
        self.acts_buf = np.zeros([size, action_dim], dtype=np.float32)
        self.rews_buf = np.zeros([size], dtype=np.float32)
        self.done_buf = np.zeros([size], dtype=np.float32)
        self.max_size, self.batch_size = size, batch_size
        self.ptr, self.size, = 0, 0

    def store(self,
        obs: np.ndarray,
        act: np.ndarray, 
        rew: float, 
        next_obs: np.ndarray, 
        done: bool,
    ):
        """Store the transition in buffer."""
      
        # 确保 state 是 numpy array 类型
        if isinstance(obs, tuple):
            obs = obs[0]  # 取出第一个元素作为观测值
        if isinstance(next_obs, tuple):
            next_obs = next_obs[0]  # 取出第一个元素作为观测值
        
        self.obs_buf[self.ptr] = obs
        self.next_obs_buf[self.ptr] = next_obs
        self.acts_buf[self.ptr] = act
        self.rews_buf[self.ptr] = rew
        self.done_buf[self.ptr] = done
        self.ptr = (self.ptr + 1) % self.max_size
        self.size = min(self.size + 1, self.max_size)

    def sample_batch(self) -> Dict[str, np.ndarray]:
        """Randomly sample a batch of experiences from memory."""
        idxs = np.random.choice(self.size, size=self.batch_size, replace=False)
        return dict(obs=self.obs_buf[idxs],
                    next_obs=self.next_obs_buf[idxs],
                    acts=self.acts_buf[idxs],
                    rews=self.rews_buf[idxs],
                    done=self.done_buf[idxs])
    def save(self, path):
        with open(path, 'wb') as f:
            pickle.dump({
                'obs': self.obs_buf[:self.size],
                'acts': self.acts_buf[:self.size],
                'rews': self.rews_buf[:self.size],
                'next_obs': self.next_obs_buf[:self.size],
                'done': self.done_buf[:self.size]
            }, f) 

    def __len__(self) -> int:
        return self.size

# ==== Actor ====
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_num=64, max_action=1.0):
        super().__init__()
        self.trunk = nn.Sequential(
            nn.Linear(state_dim, hidden_num), nn.ReLU(),
            nn.Linear(hidden_num, hidden_num), nn.ReLU()
        )
        self.mu = nn.Linear(hidden_num, action_dim)
        self.log_std = nn.Linear(hidden_num, action_dim)
        self.max_action = max_action

    def forward(self, state, is_test = False):
        x = self.trunk(state)
        mu = self.mu(x)
        log_std = torch.clamp(self.log_std(x), -5, 2)
        std = torch.exp(log_std)
        dist = Normal(mu, std)

        # if train, deterministic is true, if test is false
        if is_test:
            action = mu # for test
        else:
            action = dist.rsample() # for train

        tanh_action, log_prob = torch.tanh(action), None

        if not is_test:
            log_prob = dist.log_prob(action).sum(-1, keepdim=True)
            log_prob -= torch.log(1 - tanh_action.pow(2) + 1e-6).sum(-1, keepdim=True)

        return tanh_action * self.max_action, log_prob

# ==== Q Network ====
class CriticQ(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_num=64):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_num), nn.ReLU(),
            nn.Linear(hidden_num, hidden_num), nn.ReLU(),
            nn.Linear(hidden_num, 1)
        )

    def forward(self, state, action):
        x = torch.cat([state, action], dim=-1)
        return self.net(x)

# ==== SAC-N with EDAC Penalty ====
class SACN_EDAC:
    def __init__(self, obs_dim, action_dim, env, num_frames:int, random_steps:int, memory_size=100000, batch_size=256,
                 q_num=10, hidden_num=64, gamma=0.99, tau=0.005, policy_update_freq=2,
                 eta=0.0):  # eta 控制 penalty 强度
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.q_num = q_num
        self.gamma = gamma
        self.eta = eta
        self.tau = tau
        self.env = env
        self.batch_size = batch_size
        self.policy_update_freq = policy_update_freq
        self.num_frames = num_frames
        self.random_steps = random_steps
        self.action_dim =  action_dim
        self.observation_dim = observation_dim

        self.actor = Actor(obs_dim, action_dim, hidden_num).to(self.device)
        self.actor_optim = optim.Adam(self.actor.parameters(), lr=3e-4)        
        self.q_models = [CriticQ(obs_dim, action_dim, hidden_num).to(self.device) for _ in range(q_num)]
        self.q_optims = [optim.Adam(self.q_models[i].parameters(), lr=1e-3) for i in range(q_num)]
        self.target_q_models = [CriticQ(obs_dim, action_dim, hidden_num).to(self.device) for _ in range(q_num)]
        for target_q, q in zip(self.target_q_models, self.q_models):
            target_q.load_state_dict(q.state_dict())

        self.log_alpha = torch.tensor([0.0], requires_grad=True, device=self.device)
        self.alpha = self.log_alpha.exp()
        self.alpha_optim = optim.Adam([self.log_alpha], lr=1e-3)
        self.target_entropy = -action_dim

        self.memory = ReplayBuffer(obs_dim, action_dim, memory_size, batch_size)
        self.total_step = 0

    def select_action(self, state: np.ndarray) -> np.ndarray:
        # if initial random action should be conducted
        if self.total_step < self.random_steps:
            selected_action = np.random.uniform(-1, 1, size=self.action_dim)
        else:
            
            # 确保 state 是 numpy array 类型
            if isinstance(state, tuple):
                state = state[0]  # 取出第一个元素作为观测值
                
            selected_action = self.actor(
                torch.FloatTensor(state).to(self.device)
            )[0].detach().cpu().numpy()
        
        self.transition = [state, selected_action]
        
        return selected_action
    
    def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool]:
        """Take an action and return the response of the env."""
        next_state, reward, terminated, truncated, _= self.env.step(action)
        done = terminated or truncated

        self.transition += [reward, next_state, done]
        self.memory.store(*self.transition)

        return next_state, reward, done
    
    def update_model(self):
        samples = self.memory.sample_batch()
        s = torch.FloatTensor(samples['obs']).to(self.device)
        ns = torch.FloatTensor(samples['next_obs']).to(self.device)
        a = torch.FloatTensor(samples['acts']).to(self.device).requires_grad_(True)
        r = torch.FloatTensor(samples['rews']).unsqueeze(-1).to(self.device)
        d = torch.FloatTensor(samples['done']).unsqueeze(-1).to(self.device)

        with torch.no_grad():
            next_a, next_logp = self.actor(ns)
            q_targets = torch.stack([target_q(ns, next_a) for target_q in self.target_q_models], dim=0)
            min_q = torch.min(q_targets, dim=0)[0]
            target_q = r + self.gamma * (1 - d) * (min_q - self.alpha * next_logp)

        q_loss_total = 0.0
        grads = []
        for i in range(self.q_num):
            q_pred = self.q_models[i](s, a)
            loss = F.mse_loss(q_pred, target_q)

            # === EDAC gradient decorrelation ===
            grad = torch.autograd.grad(q_pred.sum(), a, retain_graph=True, create_graph=True)[0]
            grad = F.normalize(grad, dim=-1)
            grads.append(grad)
            # q_loss_total += loss.item()
            q_loss_total = loss.mean()

            self.q_optims[i].zero_grad()
            loss.backward(retain_graph=True)
            self.q_optims[i].step()
        # Add decorrelation loss (after Q update)
        if self.eta > 0:
            grads = torch.stack(grads, dim=0)  # [N, B, A]
            sim_matrix = torch.einsum('nbi,mbi->nm', grads, grads)  # cosine sim
            penalty = (sim_matrix.sum() - sim_matrix.trace()) / (self.q_num * (self.q_num - 1))
            penalty = self.eta * penalty / self.batch_size
            penalty.backward()

        if self.total_step % self.policy_update_freq == 0:
            for _ in range(self.policy_update_freq):
                new_action, log_prob = self.actor(s)
                q_vals = torch.stack([q(s, new_action) for q in self.q_models], dim=0)
                min_q = torch.min(q_vals, dim=0)[0]    
                actor_loss = (self.alpha * log_prob - min_q).mean()

                self.actor_optim.zero_grad()
                actor_loss.backward()
                self.actor_optim.step()

                alpha_loss = -(self.log_alpha.exp() * (log_prob + self.target_entropy).detach()).mean()
                self.alpha_optim.zero_grad()
                alpha_loss.backward()
                self.alpha_optim.step()
                self.alpha = self.log_alpha.exp()

        for target_q, q in zip(self.target_q_models, self.q_models):
            for param, target_param in zip(q.parameters(), target_q.parameters()):
                target_param.data.copy_(
                    self.tau * param.data + (1 - self.tau) * target_param.data
                )

    def train(self):
        """Train the agent."""
        # env reset
        state = self.env.reset()
        score = 0
        i_episode = 0
        frame_count = 0
        for self.total_step in range(1, self.num_frames + 1):
            action = self.select_action(state)
            next_state, reward, done = self.step(action)
            state = next_state
            
            frame_count += 1
            score += reward

            # if episode ends
            if done:
                i_episode += 1
                print(
                    f'frame is {self.total_step}, score is {score}')

                state = self.env.reset()
                score = 0
                frame_count = 0

            # if training is ready
            if (
                len(self.memory) >= self.batch_size 
                and self.total_step > self.random_steps
            ):
                losses = self.update_model()
                
    def test(self, num_episodes: int = 10, memory_size: int = 10000):
        # memory for save
        buffer = ReplayBuffer(self.observation_dim, self.action_dim, size=memory_size)

        # 创建 actor 网络
        actor = Actor(state_dim=self.observation_dim, action_dim=self.action_dim, hidden_num=args.width).to(self.device)
        checkpoint = torch.load(f'{HOME}/models/edac-{env_name}-model.pkl', map_location=self.device, weights_only=True)
        actor.load_state_dict(checkpoint['actor_state_dict'])
        actor.eval()
        """Test the agent."""
        state, _ = self.env.reset()
        score = 0
        steps = 0
        for i in range(num_episodes):
            done = False
            frame = 0
            while not done:
                selected_action = actor(
                                torch.FloatTensor(state).to(self.device), is_test=True
                            )[0].detach().cpu().numpy()
                next_state, reward, terminated, truncated, _= self.env.step(selected_action)
                done = terminated or truncated
                buffer.store(state, selected_action, reward, next_state, done)

                state = next_state
                frame += 1
                steps += 1
                score += reward

            print(f'Episode {i + 1}: Score : {score}')
            state, _ = self.env.reset()
            score = 0

            if steps >= memory_size:
                break
        # 保存memory
        buffer.save(f'{HOME}/models/{env_name}-dataset.pkl')
        print(f"Memory saved")


def save_best_model(path):
    #gpu tensor format save
    torch.save(obj={
        'actor_state_dict': agent.actor.state_dict(),
        'width' : args.width,
        'batch_size' : args.batch_size,
        'q_num' : args.q_num,
        'env_name': env_name,
        'model': 'edac',
        }, f=path)


import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--q_num', type=int, default=2)
parser.add_argument('--batch_size', type=int, default=256)
parser.add_argument('--width', type=int, default=256)
args = parser.parse_args()

HOME = os.path.dirname(os.path.realpath(__file__))

# parameters
num_frames = int(3e6)
memory_size = int(1e6)
initial_random_steps = int(1e4)

env_name = 'Walker2d-v4'
my_env = gym.make(env_name)
# env_name = 'Hopper-v5'
# my_env = gym.make(env_name, ctrl_cost_weight=1e-3,)
observation_dim = my_env.observation_space.shape[0]
action_dim = my_env.action_space.shape[0]

print(f'Observation dimension is {observation_dim}, action dimension is {action_dim}.')

agent = SACN_EDAC(
    obs_dim=observation_dim,
    action_dim=action_dim,
    env=my_env,
    num_frames=num_frames,
    random_steps=initial_random_steps,
    memory_size=memory_size,
    batch_size=args.batch_size,
    q_num=args.q_num,
    hidden_num=args.width,
    eta=0, 
)

if True:  # train
    agent.train()
    os.makedirs(f'{HOME}/models', exist_ok=True)
    save_best_model(f'{HOME}/models/edac-{env_name}-model.pkl')
else:
    agent.test(num_episodes=10000, memory_size=1000_000)
    
print('Complete')