import torch
from torch import nn
import numpy as np
from torch.distributions import Normal

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
INPUT_DIM = 2
NACTIONS = 2

def try_get_agent(path):
    try:
        pth = torch.load(path)
    except:
        pth = torch.load(path, weights_only=False)
    if isinstance(pth, nn.Module):
        return pth
    try:
        hidden_dim = pth['actor.weight'].shape[1]
        model = ActorCritic_v1(hidden_dim=hidden_dim)
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        hidden_dim = pth['actor.weight'].shape[1]
        model = ActorCritic_v2()
        model.load_state_dict(pth)
        return model
    except:
        pass
    try:
        hidden_dim = pth['features.0.weight'].shape[0]
        model = ActorCritic_v3(hidden_dim=hidden_dim)
        model.load_state_dict(pth)
        return model
    except:
        pass
    print(f"无法加载模型: {path}")
    return None

class ActorCritic_v1(nn.Module):

    def __init__(self, input_dim=INPUT_DIM, n_actions=NACTIONS, hidden_dim=128):
        super(ActorCritic_v1, self).__init__()
        
        # 简化特征提取层
        self.features = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 简化Actor网络
        self.actor = nn.Sequential(
            nn.Linear(hidden_dim, n_actions * 2)  # 直接输出均值和标准差
        )
        
        # 简化Critic网络
        self.critic = nn.Sequential(
            nn.Linear(hidden_dim, 1)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
        
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
            if module.bias is not None:
                module.bias.data.zero_()
        
    def forward(self, x):
        features = self.features(x)
        action_params = self.actor(features)
        value = self.critic(features)
        
        # 分离均值和标准差
        mean, std = torch.chunk(action_params, 2, dim=-1)
        std = torch.clamp(std, -20, 2)  # 限制标准差范围
        std = torch.exp(std)  # 确保标准差为正
        
        return mean, std, value
    
    def get_action(self, state, action=None):
        if isinstance(state, np.ndarray):
            state = torch.FloatTensor(state).to(device)
        else:
            state = torch.FloatTensor(np.array(state)).to(device)
        mean, std, value = self.forward(state)
        dist = Normal(mean, std)
        
        if action is None:
            action = dist.sample()
        
        log_prob = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().mean()
        
        return action.cpu().numpy(), log_prob, entropy, value

class ActorCritic_v2(nn.Module):
    def __init__(self, input_dim=INPUT_DIM, n_actions=NACTIONS, hidden_dim=64):
        super(ActorCritic_v2, self).__init__()
        
        # 极简特征提取层
        self.features = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU()
        )
        
        # 极简Actor网络
        self.actor = nn.Linear(hidden_dim, n_actions * 2)
        
        # 极简Critic网络
        self.critic = nn.Linear(hidden_dim, 1)
        
        # 初始化权重
        self.apply(self._init_weights)
        
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.xavier_uniform_(module.weight, gain=0.01)
            if module.bias is not None:
                module.bias.data.zero_()
        
    def forward(self, x):
        features = self.features(x)
        action_params = self.actor(features)
        value = self.critic(features)
        
        # 分离均值和标准差
        mean, std = torch.chunk(action_params, 2, dim=-1)
        std = torch.clamp(std, -20, 2)  # 限制标准差范围
        std = torch.exp(std)  # 确保标准差为正
        
        return mean, std, value
    
    def get_action(self, state, action=None):
        if isinstance(state, np.ndarray):
            state = torch.FloatTensor(state).to(device)
        else:
            state = torch.FloatTensor(np.array(state)).to(device)
        mean, std, value = self.forward(state)
        dist = Normal(mean, std)
        
        if action is None:
            action = dist.sample()
        
        log_prob = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().mean()
        
        return action.cpu().numpy(), log_prob, entropy, value

class ActorCritic_v3(nn.Module):
    def __init__(self, input_dim=INPUT_DIM, n_actions=NACTIONS, hidden_dim=256):
        super(ActorCritic_v3, self).__init__()
        
        # 特征提取层
        self.features = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.1)
        )
        
        # 注意力层
        self.attention = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 4),
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, hidden_dim),
            nn.Sigmoid()
        )
        
        # Actor网络
        self.actor = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, n_actions * 2)
        )
        
        # Critic网络
        self.critic = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Linear(hidden_dim // 2, 1)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
        
    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight, gain=np.sqrt(2))
            if module.bias is not None:
                module.bias.data.zero_()
    
    def forward(self, x):
        # 特征提取
        features = self.features(x)
        
        # 注意力机制
        attention_weights = self.attention(features)
        features = features * attention_weights
        
        # Actor和Critic输出
        action_params = self.actor(features)
        value = self.critic(features)
        
        # 分离均值和标准差
        mean, std = torch.chunk(action_params, 2, dim=-1)
        std = torch.clamp(std, -20, 2)  # 限制标准差范围
        std = torch.exp(std)  # 确保标准差为正
        
        return mean, std, value
    
    def get_action(self, state, action=None):
        if isinstance(state, np.ndarray):
            state = torch.FloatTensor(state).to(device)
        else:
            state = torch.FloatTensor(np.array(state)).to(device)
        
        mean, std, value = self.forward(state)
        dist = Normal(mean, std)
        
        if action is None:
            action = dist.sample()
        
        log_prob = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().mean()
        
        return action.cpu().numpy(), log_prob, entropy, value

if __name__ == '__main__':
    pth = r"E:\25spring\FYP\pymodules\deep-models\strategy\ppo-202505272015\best_ppo_cockroach.pth"
    print(try_get_agent(pth))