import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

# 定义Actor网络
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(state_dim, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, action_dim)

    def forward(self, state):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        return torch.tanh(self.fc3(x))


# 定义Critic网络
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(state_dim + action_dim, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 1)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 定义DDPG Agent
class DDPGAgent:
    def __init__(self, state_dim, action_dim):
        self.actor = Actor(state_dim, action_dim)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)
        self.critic = Critic(state_dim, action_dim)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)
        self.criterion = nn.MSELoss()

    def get_action(self, state):
        state = torch.FloatTensor(state)
        return self.actor(state).detach().numpy()

    def train(self, state, action, reward, next_state, done):
        state = torch.FloatTensor(state)
        action = torch.FloatTensor(action)
        reward = torch.FloatTensor([reward])
        next_state = torch.FloatTensor(next_state)

        # 训练Critic网络
        target_action = self.actor(next_state).detach()
        target_value = self.critic(next_state, target_action).detach()
        target_q = reward + (1 - done) * 0.99 * target_value
        predicted_q = self.critic(state, action)
        critic_loss = self.criterion(predicted_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # 训练Actor网络
        predicted_action = self.actor(state)
        actor_loss = -self.critic(state, predicted_action).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()


class CustomEnvironment:
    def __init__(self):
        # 初始化环境参数
        self.state_dim = 4  # 假设状态有4个维度
        self.action_dim = 2  # 假设动作有2个维度
        self.max_steps = 100
        self.current_step = 0

    def reset(self):
        # 重置环境，返回初始状态
        self.current_step = 0
        return np.random.rand(self.state_dim)  # 这里使用随机状态作为示例

    def step(self, action):
        # 执行动作并返回新状态、奖励、是否结束
        self.current_step += 1
        if self.current_step >= self.max_steps:
            done = True
        else:
            done = False
        reward = np.random.rand()  # 这里使用随机奖励作为示例
        next_state = np.random.rand(self.state_dim)  # 这里使用随机状态作为示例
        return next_state, reward, done, {}

    def render(self):
        # 可选：显示环境状态
        pass


# 初始化DDPG Agent
state_dim = 6  # 假设状态有4个维度  error vel error_yaw left_power right_power servo
action_dim = 3  # 假设动作有2个维度 left_power right_power servo
agent = DDPGAgent(state_dim, action_dim)

# 训练循环
for episode in range(num_episodes):
    state = env.reset()
    done = False
    total_reward = 0

    while not done:
        action = agent.get_action(state)
        next_state, reward, done, _ = env.step(action)
        agent.train(state, action, reward, next_state, done)
        state = next_state
        total_reward += reward
