# -*- coding:utf-8 -*-
# @FileName :test.py
# @Time :2024/6/15 上午10:42
# @Author :ShengYe
# @Des : DDPG Agent 在 Carla 仿真环境中的实现

import math
import random
from collections import deque

import carla
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

from my_controller.controller import Longitudinal_PID_controller  # 纵向 PID 控制器
from my_code.my_utils import generate_dlc_path  # 生成路径
from planner import planning_utils  # 路径规划工具


# 定义 Actor 网络
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, action_range):
        super(Actor, self).__init__()
        self.fc1 = nn.Linear(state_dim, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, action_dim)
        self.action_range = action_range

    def forward(self, state):
        x = F.relu(self.fc1(state))
        x = F.relu(self.fc2(x))
        x = torch.tanh(self.fc3(x)) * self.action_range
        return x


# 定义 Critic 网络
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()
        self.fc1 = nn.Linear(state_dim + action_dim, 64)
        self.fc2 = nn.Linear(64, 64)
        self.fc3 = nn.Linear(64, 1)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# DDPG Agent
class DDPGAgent:
    def __init__(self, state_dim, action_dim, action_range=(-1., 1.), actor_lr=1e-3, critic_lr=1e-3, gamma=0.99, tau=1e-2):
        self.actor = Actor(state_dim, action_dim, action_range)
        self.actor_target = Actor(state_dim, action_dim, action_range)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=actor_lr)

        self.critic = Critic(state_dim, action_dim)
        self.critic_target = Critic(state_dim, action_dim)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=critic_lr)

        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())

        self.gamma = gamma
        self.tau = tau

    def select_action(self, state):
        state = torch.FloatTensor(state).unsqueeze(0)
        return self.actor(state).detach().numpy().flatten()

    def train(self, replay_buffer, batch_size=64):
        state_batch, action_batch, next_state_batch, reward_batch, done_batch = replay_buffer.sample(batch_size)

        state_batch = torch.FloatTensor(state_batch)
        action_batch = torch.FloatTensor(action_batch)
        next_state_batch = torch.FloatTensor(next_state_batch)
        reward_batch = torch.FloatTensor(reward_batch).unsqueeze(1)
        done_batch = torch.FloatTensor(done_batch).unsqueeze(1)

        # 更新 Critic 网络
        Q = self.critic(state_batch, action_batch)
        next_actions = self.actor_target(next_state_batch)
        Q_next = self.critic_target(next_state_batch, next_actions.detach())
        target_Q = reward_batch + (1 - done_batch) * self.gamma * Q_next
        critic_loss = F.mse_loss(Q, target_Q.detach())

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # 更新 Actor 网络
        actor_loss = -self.critic(state_batch, self.actor(state_batch)).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        # 软更新目标网络
        self.soft_update(self.actor_target, self.actor, self.tau)
        self.soft_update(self.critic_target, self.critic, self.tau)

    @staticmethod
    def soft_update(target_net, eval_net, tau):
        for target_param, param in zip(target_net.parameters(), eval_net.parameters()):
            target_param.data.copy_(tau * param.data + (1.0 - tau) * target_param.data)


# Carla 环境设置
class CarlaEnv:
    def __init__(self):
        self.actor_list = []
        self.client = carla.Client('localhost', 2000)
        self.client.set_timeout(10.0)
        self.world = self.client.load_world('town05')
        self.amap = self.world.get_map()
        self.ego_spawn_point = self.amap.get_spawn_points()[7]
        self.ego_vehicle = self.spawn_ego_vehicle()

        self._vehicle = self.ego_vehicle
        self._max_throttle = 0.75
        self._max_brake = 0.3
        self._max_steer = 1
        self.min_steer = -1
        self.Lon_control = Longitudinal_PID_controller(self.ego_vehicle)
        self.get_pathway()
        self.cal_vehicle_info()

    def get_pathway(self):
        waypoints = generate_dlc_path.get_waypoints_2(self.ego_spawn_point)
        self._target_path = planning_utils.waypoint_list_2_target_path(waypoints)

    def spawn_ego_vehicle(self):
        model3_bp = self.world.get_blueprint_library().find('vehicle.tesla.model3')
        model3_bp.set_attribute('color', '255,88,0')
        model3_actor = self.world.spawn_actor(model3_bp, self.ego_spawn_point)

        print("【车辆已经生成】transform为", self.ego_spawn_point)
        physics_control = carla.VehiclePhysicsControl()
        physics_control.mass = 1412
        model3_actor.apply_physics_control(physics_control)
        self.actor_list.append(model3_actor)
        return model3_actor


    def reset(self):
        # 销毁当前所有车辆并重新生成ego_vehicle
        self.destroy()
        self.ego_vehicle = self.spawn_ego_vehicle()
        self.actor_list = [self.ego_vehicle]
        self.get_pathway()
        self.cal_vehicle_info()
        return self.get_state()

    def get_state(self):
        self.cal_vehicle_info()
        return self._vehicle_state

    def step(self, action, target_speed):
        control = self.get_vehicle_control(action, target_speed)
        self.ego_vehicle.apply_control(control)
        self.world.tick()
        self.set_spectator()
        return self.get_state(), self.get_reward(), self.get_terminal(), {"waypoints": self._target_path}

    def get_vehicle_control(self, action, target_speed):
        control = carla.VehicleControl()
        control.steer = max(self.min_steer, min(self._max_steer, action))
        control.throttle = min(self._max_throttle, max(0, self.Lon_control.PID_control(target_speed)))
        control.brake = max(0, -self.Lon_control.PID_control(target_speed))
        control.hand_brake = False
        control.manual_gear_shift = False
        control.gear = 1
        return control

    def get_reward(self):
        self.cal_vehicle_info()
        return -self.e_rr

    def destroy(self):
        # 销毁所有的actor
        for actor in self.actor_list:
            actor.destroy()
        self.actor_list = []

    def cal_error_k_fun(self, ts=0.01):
        """
        计算预测点和规划点的误差
        :param ts: 控制周期
        """
        x, y, fi, V_y, fi_dao = self._vehicle_state
        V_x = self._vehicle_Vx
        x += V_x * ts * math.cos(fi) - V_y * ts * math.sin(fi)
        y += V_y * ts * math.cos(fi) + V_x * ts * math.sin(fi)
        fi += fi_dao * ts
        self.x_pre, self.y_pre = x, y

        min_index = self.get_min_index(x, y)
        self.min_index = min_index

        tor_v = np.array([math.cos(self._target_path[min_index][2]), math.sin(self._target_path[min_index][2])])
        n_v = np.array([-math.sin(self._target_path[min_index][2]), math.cos(self._target_path[min_index][2])])
        d_v = np.array([x - self._target_path[min_index][0], y - self._target_path[min_index][1]])
        e_d = np.dot(n_v, d_v)
        e_s = np.dot(tor_v, d_v)

        self.x_pro, self.y_pro = np.array([self._target_path[min_index][0], self._target_path[min_index][1]]) + e_s * tor_v
        theta_r = self._target_path[min_index][2] + self._target_path[min_index][3] * e_s
        e_d_dao = V_y * math.cos(fi - theta_r) + V_x * math.sin(fi - theta_r)
        e_fi = math.sin(fi - theta_r)
        S_dao = (V_x * math.cos(fi - theta_r) - V_y * math.sin(fi - theta_r)) / (1 - self._target_path[min_index][3] * e_d)
        e_fi_dao = fi_dao - self._target_path[min_index][3] * S_dao
        self.k_r = self._target_path[min_index][3]
        self.e_rr = (e_d, e_d_dao, e_fi, e_fi_dao)

    def get_min_index(self, x, y):
        min_d = float('inf')
        min_index = self.min_index
        for i in range(self.min_index, min(self.min_index + 50, len(self._target_path))):
            d = (self._target_path[i][0] - x) ** 2 + (self._target_path[i][1] - y) ** 2
            if d < min_d:
                min_d = d
                min_index = i
        return min_index

    def cal_vehicle_info(self):
        vehicle_loc = self._vehicle.get_location()
        x, y = vehicle_loc.x, vehicle_loc.y
        fi = self._vehicle.get_transform().rotation.yaw * (math.pi / 180)
        V = self._vehicle.get_velocity()
        v_vec = [V.x, V.y, V.z]
        forward_vec = self._vehicle.get_transform().get_forward_vector()
        Vx = max(np.dot(v_vec, [forward_vec.x, forward_vec.y, forward_vec.z]), 0.005)
        right_vec = self._vehicle.get_transform().get_right_vector()
        Vy = np.dot(v_vec, [right_vec.x, right_vec.y, right_vec.z])
        fi_dao = self._vehicle.get_angular_velocity().z * (math.pi / 180)
        self._vehicle_state = (x, y, fi, Vy, fi_dao)
        self._vehicle_Vx = Vx

    def get_terminal(self):
        return self.get_distance() < 2

    def get_distance(self):
        location = self.ego_vehicle.get_transform().location
        return location.distance(self._target_path[-1][0].transform.location)

    def set_spectator(self):
        self.world.get_spectator().set_transform(
            carla.Transform(self.ego_vehicle.get_transform().location +
                            carla.Location(z=50),
                            carla.Rotation(pitch=-90))
        )


class ReplayBuffer:
    def __init__(self, buffer_size):
        self.buffer = deque(maxlen=buffer_size)

    def add(self, state, action, next_state, reward, done):
        self.buffer.append((state, action, next_state, reward, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        state_batch, action_batch, next_state_batch, reward_batch, done_batch = zip(*batch)
        return state_batch, action_batch, next_state_batch, reward_batch, done_batch

    def __len__(self):
        return len(self.buffer)

    def clear(self):
        self.buffer.clear()


if __name__ == '__main__':
    env = CarlaEnv()

    state_dim = len(env.get_state())
    action_dim = 1  # 例如：用于转向的横向控制角度
    action_range = 0.3  # 例如：横向控制角度的范围为 [-1, 1]

    agent = DDPGAgent(state_dim, action_dim, action_range)

    num_episodes = 1000
    max_steps_per_episode = 1000
    batch_size = 64

    replay_buffer = ReplayBuffer(buffer_size=100000)
    max_speed = 50  # 初始速度设为50 km/h
    env.set_spectator()
    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0

        for step in range(max_steps_per_episode):
            action = agent.select_action(state)
            next_state, reward, done, _ = env.step(action, max_speed)
            replay_buffer.add(state, action, next_state, reward, done)

            if len(replay_buffer) > batch_size:
                agent.train(replay_buffer, batch_size)

            state = next_state
            episode_reward += reward

            if done:
                break

        print(f"Episode: {episode}, Reward: {episode_reward}")
