import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal
from torch.utils.tensorboard import SummaryWriter
from env import RNNCockroachEnv, get_rnn_env
from agent_history import try_get_agent, ActorCritic_v3
from ppo_utils import PPOMemory, get_save_dir

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

class PPOAgent:
    def __init__(
            self, 
            env,
            model_class,
            hidden_dim=32,
            lr=0.0003,
            gamma=0.99,
            gae_lambda=0.95,
            policy_clip=0.2,
            batch_size=32,
            n_epochs=10,
            entropy_coef=0.01,
            value_coef=0.5,
            max_grad_norm=0.5,
            normalize_advantage=True,
            patience=100,
            min_delta=0.05
        ):
        self.env = env
        self.gamma = gamma
        self.policy_clip = policy_clip
        self.n_epochs = n_epochs
        self.gae_lambda = gae_lambda
        self.entropy_coef = entropy_coef
        self.value_coef = value_coef
        self.max_grad_norm = max_grad_norm
        self.normalize_advantage = normalize_advantage
        self.patience = patience
        self.min_delta = min_delta
        
        # 获取观察空间和动作空间的维度
        self.input_dim = 2  # (dx, dy)
        self.n_actions = 2  # (left_freq, right_freq)
        
        self.actor_critic = model_class(self.input_dim, self.n_actions, hidden_dim).to(device)
        self.optimizer = optim.Adam(self.actor_critic.parameters(), lr=lr)
        self.memory = PPOMemory(batch_size)
        self.total_steps = 0
        
        # 创建保存目录
        self.save_dir = get_save_dir()
        self.writer = SummaryWriter(os.path.join(self.save_dir, 'logs'))
        
        # 早停相关变量
        self.best_avg_score = -float('inf')
        self.no_improve_episodes = 0
        self.early_stop = False

    def store_transition(self, state, action, prob, val, reward, done):
        self.memory.store(state, action, prob, val, reward, done)
    
    def learn(self):
        states, actions, old_probs, vals, rewards, dones, batches = self.memory.get_batches()
        
        values = vals
        advantage = np.zeros(len(rewards), dtype=np.float32)
        
        # 计算GAE优势估计
        for t in range(len(rewards)-1):
            discount = 1
            a_t = 0
            for k in range(t, len(rewards)-1):
                a_t += discount * (rewards[k] + self.gamma * values[k+1] * (1-dones[k]) - values[k])
                discount *= self.gamma * self.gae_lambda
            advantage[t] = a_t
        
        advantage = torch.tensor(advantage, dtype=torch.float32).to(device)
        values = torch.tensor(values, dtype=torch.float32).to(device)
        
        # 标准化优势
        if self.normalize_advantage and len(advantage) > 1:
            advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-8)
        
        for _ in range(self.n_epochs):
            for batch in batches:
                states_batch = torch.tensor(states[batch], dtype=torch.float32).to(device)
                old_probs_batch = torch.tensor(old_probs[batch], dtype=torch.float32).to(device)
                actions_batch = torch.tensor(actions[batch], dtype=torch.float32).to(device)
                
                # 获取新的动作概率和价值估计
                mean, std, critic_value = self.actor_critic(states_batch)
                critic_value = critic_value.squeeze()
                
                dist = Normal(mean, std)
                new_probs = dist.log_prob(actions_batch).sum(dim=-1)
                entropy = dist.entropy().mean()
                
                # 计算Actor损失
                prob_ratio = (new_probs - old_probs_batch).exp()
                weighted_probs = advantage[batch] * prob_ratio
                clipped_probs = torch.clamp(prob_ratio, 1-self.policy_clip, 1+self.policy_clip) * advantage[batch]
                actor_loss = -torch.min(weighted_probs, clipped_probs).mean()
                
                # 计算Critic损失
                returns = advantage[batch] + values[batch]
                critic_loss = nn.MSELoss()(critic_value, returns)
                
                # 总损失
                total_loss = actor_loss + self.value_coef * critic_loss - self.entropy_coef * entropy
                
                self.optimizer.zero_grad()
                total_loss.backward()
                nn.utils.clip_grad_norm_(self.actor_critic.parameters(), self.max_grad_norm)
                self.optimizer.step()
                
                # 记录损失
                self.writer.add_scalar('Loss/actor', actor_loss.item(), self.total_steps)
                self.writer.add_scalar('Loss/critic', critic_loss.item(), self.total_steps)
                self.writer.add_scalar('Loss/entropy', entropy.item(), self.total_steps)
        
        self.memory.clear()
    
    def save_model(self, filename):
        save_path = os.path.join(self.save_dir, filename)
        torch.save(self.actor_critic.state_dict(), save_path)
        print(f"模型已保存到: {save_path}")
    
    def load_model(self, path):
        # self.actor_critic.load_state_dict(torch.load(path))
        self.actor_critic = try_get_agent(path).to(device)
    
    def train(self, n_episodes, max_steps_per_episode=1000, max_explore_steps=1000):
        best_score = -float('inf')
        scores = []
        avg_scores = []
        
        # 自由探索阶段
        print(f"开始自由探索阶段 {max_explore_steps} 步")
        observation = self.env.reset()
        done = False
        explore_steps = 0
        
        while explore_steps < max_explore_steps and not done:
            # 随机动作
            action = np.random.uniform(10, 60, size=2)  # 在[10, 60]范围内随机选择动作
            next_observation, reward, terminated, truncated, info = self.env.step(action)
            done = terminated or truncated
            
            # 存储经验
            state = np.array(observation[0], dtype=np.float32)
            self.store_transition(state, action, 0, 0, reward, done)
            
            observation = next_observation
            explore_steps += 1
        
        print("自由探索完成，开始正式训练...")
        
        for episode in range(n_episodes):
            if self.early_stop:
                print(f"触发早停，在第{episode}回合停止训练")
                break
                
            observation = self.env.reset()
            done = False
            score = 0
            steps = 0
            
            while not done and steps < max_steps_per_episode:
                # 获取动作
                state = np.array(observation[0], dtype=np.float32)  # 确保状态是numpy数组
                action, prob, entropy, val = self.actor_critic.get_action(state)
                # 将动作范围限制在[10, 60]之间
                action = np.clip(action, 10, 60)
                
                # 执行动作
                next_observation, reward, terminated, truncated, info = self.env.step(action)
                done = terminated or truncated
                
                self.store_transition(state, action, prob, val, reward, done)
                score += reward
                steps += 1
                self.total_steps += 1
                
                if steps % self.memory.batch_size == 0 or done:
                    if len(self.memory.states) >= self.memory.batch_size:
                        self.learn()
                
                observation = next_observation
            
            scores.append(score)
            recent_scores = scores[-100:] if len(scores) >= 100 else scores
            avg_score = np.mean(recent_scores)
            avg_scores.append(avg_score)
            
            self.writer.add_scalar('Reward/episode', score, episode)
            self.writer.add_scalar('Reward/average', avg_score, episode)
            
            # 早停检查
            if avg_score > self.best_avg_score + self.min_delta:
                self.best_avg_score = avg_score
                self.no_improve_episodes = 0
                self.save_model('best_model.pth')
                print(f"发现更好的模型，平均得分: {avg_score:.2f}")
            else:
                self.no_improve_episodes += 1
                if self.no_improve_episodes >= self.patience:
                    self.early_stop = True
            
            if (episode + 1) % 100 == 0:
                self.save_model(f'model_ep{episode+1}.pth')
            
            print(f'Episode: {episode} | Score: {score:.2f} | Avg Score: {avg_score:.2f} | Steps: {steps} | No Improve: {self.no_improve_episodes}')
            
            if avg_score >= 45 and len(recent_scores) >= 100:
                print(f"环境在{episode}回合后被认为已解决，平均得分: {avg_score:.2f}")
                self.save_model('solved_model.pth')
                break
        
        self.save_model('final_model.pth')
        print("训练完成，已保存最终模型")
    
    def evaluate(self, episodes=10, render=True):
        total_reward = 0
        
        for episode in range(episodes):
            observation = self.env.reset()
            done = False
            ep_reward = 0
            steps = 0
            
            while not done:
                if render:
                    self.env.render()
                
                state = np.array(observation[0], dtype=np.float32)  # 确保状态是numpy数组
                action, _, _, _ = self.actor_critic.get_action(state)
                action = np.clip(action, 10, 60)
                observation, reward, terminated, truncated, _ = self.env.step(action)
                done = terminated or truncated
                ep_reward += reward
                steps += 1
            
            total_reward += ep_reward
            print(f'Episode {episode}: 得分 = {ep_reward:.2f}, 步数 = {steps}')
        
        avg_reward = total_reward / episodes
        print(f'在{episodes}回合的平均得分: {avg_reward:.2f}')
        return avg_reward

if __name__ == '__main__':
    env = get_rnn_env()
    model_class=ActorCritic_v3
    hidden_dim = 32  # 进一步减小网络规模
    lr = 0.0003  # 略微提高学习率
    gamma = 0.99
    gae_lambda = 0.95
    policy_clip = 0.2
    batch_size = 32
    n_epochs = 10
    n_episodes = 300
    entropy_coef = 0.01  # 增加探索
    value_coef = 0.5
    max_grad_norm = 0.5
    normalize_advantage = True
    patience = 100  # 增加耐心值
    min_delta = 0.05  # 降低改善阈值
    agent = PPOAgent(env,model_class,hidden_dim,lr,gamma,gae_lambda,policy_clip,batch_size,n_epochs,entropy_coef,value_coef,max_grad_norm,normalize_advantage,patience,min_delta
    )
    agent.train(
        n_episodes=n_episodes,
        max_steps_per_episode=200,
        max_explore_steps=5000  # 减少探索步数
    )
    env.close()

    env_eval = get_rnn_env(render_mode='human')
    pth = r"E:\25spring\FYP\pymodules\deep-models\strategy\ppo-202505272015\best_ppo_cockroach.pth"
    hidden_dim = torch.load(pth)['actor.weight'].shape[1]
    agent_eval = PPOAgent(env=env_eval, model_class=ActorCritic_v3, hidden_dim=hidden_dim)
    agent_eval.load_model(pth)
    agent_eval.evaluate(episodes=5)
    env_eval.close()
