# tesla_route_search_sim.py
# A simplified simulation of Tesla's Neural Network + Monte Carlo Route Search

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from typing import List, Tuple

# -----------------------------
# 1. Environment Simulation
# -----------------------------

class DrivingEnvironment:
    def __init__(self):
        self.ego_x = 0.0
        self.ego_y = 0.0
        self.ego_yaw = 0.0  # radians
        self.speed = 10.0   # m/s
        self.dt = 0.1       # time step

        # Lane width = 3.7m, center at y=0
        self.lane_center_y = 0.0
        self.lane_width = 3.7

        # Obstacle (e.g., slow vehicle)
        self.obstacle_x = 50.0
        self.obstacle_y = 3.0
        self.obstacle_speed = 8.0

    def reset(self):
        self.ego_x = 0.0
        self.ego_y = 0.0
        self.ego_yaw = 0.0
        self.speed = 10.0
        return self.get_observation()

    def get_observation(self) -> np.ndarray:
        # Simplified observation vector
        obs = np.array([
            self.ego_x,
            self.ego_y,
            self.ego_yaw,
            self.speed,
            self.obstacle_x - self.ego_x,
            self.obstacle_y - self.ego_y,
            self.obstacle_speed
        ])
        return obs

    def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool]:
        """
        action: [steering_angle, acceleration]
        """
        steering = action[0]
        acc = action[1]

        # Simple kinematic bicycle model (approximate)
        L = 2.9  # wheelbase
        delta = np.clip(steering, -np.pi/6, np.pi/6)
        a = np.clip(acc, -3.0, 3.0)

        self.speed += a * self.dt
        self.speed = np.clip(self.speed, 0.0, 30.0)  # max 108 km/h

        self.ego_x += self.speed * np.cos(self.ego_yaw) * self.dt
        self.ego_y += self.speed * np.sin(self.ego_yaw) * self.dt
        self.ego_yaw += (self.speed / L) * np.tan(delta) * self.dt

        # Move obstacle
        self.obstacle_x += self.obstacle_speed * self.dt

        next_obs = self.get_observation()
        reward = self.compute_reward()
        done = self.is_done()

        return next_obs, reward, done

    def compute_reward(self) -> float:
        reward = 0.0

        # Progress
        reward += 0.1 * self.speed * np.cos(self.ego_yaw)

        # Stay in lane
        lateral_error = abs(self.ego_y - self.lane_center_y)
        if lateral_error > self.lane_width / 2:
            reward -= 1.0  # outside lane
        else:
            reward += 0.1

        # Avoid collision
        dx = self.ego_x - self.obstacle_x
        dy = self.ego_y - self.obstacle_y
        dist = np.hypot(dx, dy)
        if dist < 5.0:
            reward -= 10.0  # crash!
        elif dist < 10.0:
            reward -= 1.0  # too close

        # Smoothness
        jerk_penalty = abs(np.tan(self.ego_yaw)) * 0.1
        reward -= jerk_penalty

        return reward

    def is_done(self) -> bool:
        dx = self.ego_x - self.obstacle_x
        dy = self.ego_y - self.obstacle_y
        dist = np.hypot(dx, dy)
        return dist < 5.0 or self.ego_x > 200.0


# -----------------------------
# 2. Policy Network (Trajectory Proposer)
# -----------------------------

class PolicyNet(nn.Module):
    def __init__(self, input_dim=7, hidden_dim=64, num_trajectories=5, horizon=10):
        super(PolicyNet, self).__init__()
        self.horizon = horizon
        self.num_trajectories = num_trajectories

        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, num_trajectories * horizon * 2)  # x, y for each step
        )

    def forward(self, x):
        batch_size = x.shape[0]
        out = self.net(x)
        # Reshape: (B, num_trajs * horizon * 2) -> (B, num_trajs, horizon, 2)
        trajs = out.view(batch_size, self.num_trajectories, self.horizon, 2)
        return trajs  # predicted (x,y) waypoints


# -----------------------------
# 3. Value Network (Scorer)
# -----------------------------

class ValueNet(nn.Module):
    def __init__(self, traj_features=2, hidden_dim=64):
        super(ValueNet, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(traj_features * 10, hidden_dim),  # flatten 10 steps
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )
        self.value_head = nn.Linear(hidden_dim, 1)

    def forward(self, traj):
        # traj shape: (B, H, 2) -> flatten last two dims
        B, H, D = traj.shape
        flat = traj.view(B, -1)
        h = self.encoder(flat)
        value = self.value_head(h)
        return value.squeeze(-1)


# -----------------------------
# 4. Monte Carlo Trajectory Sampler & Evaluator
# -----------------------------

def sample_perturbed_trajectories(base_traj: torch.Tensor, num_samples=64) -> torch.Tensor:
    """
    Perturb base trajectory with noise (Monte Carlo sampling).
    base_traj: (H, 2) tensor
    returns: (N, H, 2)
    """
    H, _ = base_traj.shape
    noise_scale = 0.5
    samples = []
    for _ in range(num_samples):
        noise = torch.randn_like(base_traj) * noise_scale
        sampled = base_traj + noise
        samples.append(sampled)
    return torch.stack(samples)  # (N, H, 2)


def select_best_trajectory(mc_samples: torch.Tensor, value_net: ValueNet) -> torch.Tensor:
    """
    Score all samples and return the best one.
    """
    with torch.no_grad():
        values = value_net(mc_samples)  # (N,)
        best_idx = torch.argmax(values)
        return mc_samples[best_idx]  # (H, 2)


# -----------------------------
# 5. Expert Data Generator (for Imitation Learning)
# -----------------------------

def generate_expert_action(env: DrivingEnvironment) -> np.ndarray:
    """
    Simple rule-based "expert" for training.
    """
    dx = env.obstacle_x - env.ego_x
    dy = env.obstacle_y - env.ego_y

    if dx > 10 and abs(dy) < 1:
        # Safe to go straight
        steering = 0.0
    elif dy < 0 and dx > 10:
        # Obstacle on left, steer right
        steering = 0.1
    elif dy > 0 and dx > 10:
        # Obstacle on right, steer left
        steering = -0.1
    else:
        # Too close, brake and avoid
        steering = -0.1 if dy > 0 else 0.1
    acc = 1.0 if env.speed < 25.0 else 0.0
    return np.array([steering, acc])


# -----------------------------
# 6. Training Loop (Imitation Learning)
# -----------------------------

def train_policy_and_value(policy_net, value_net, epochs=1000):
    optimizer_p = optim.Adam(policy_net.parameters(), lr=3e-4)
    optimizer_v = optim.Adam(value_net.parameters(), lr=3e-4)
    criterion_mse = nn.MSELoss()
    env = DrivingEnvironment()

    for epoch in range(epochs):
        optimizer_p.zero_grad()
        optimizer_v.zero_grad()

        obs = env.reset()
        state_tensor = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)

        # Policy proposes K candidate trajectories
        with torch.no_grad():
            candidates = policy_net(state_tensor)  # (1, K, H, 2)
            base_traj = candidates[0, 0]  # pick first proposal

        # Monte Carlo sampling
        mc_samples = sample_perturbed_trajectories(base_traj, num_samples=32)  # (N, H, 2)

        # Generate expert label (what ideal action should lead to)
        expert_action = generate_expert_action(env)
        _, _, _ = env.step(expert_action)
        next_obs = env.get_observation()
        target_position = torch.tensor([[next_obs[0] + 5, next_obs[1]]], dtype=torch.float32)

        # Forward pass through policy
        pred_trajs = policy_net(state_tensor)
        pred_first_point = pred_trajs[0, 0, 0]  # predicted first waypoint

        # Supervised loss: match expert-like behavior
        loss_policy = criterion_mse(pred_first_point, target_position[0])

        # Value network: train to predict good vs bad trajectories
        good_traj = base_traj.unsqueeze(0)  # assume base is good
        bad_traj = torch.roll(good_traj, shifts=5, dims=1) + torch.randn_like(good_traj) * 2.0
        all_trajs = torch.cat([good_traj, bad_traj], dim=0)
        labels = torch.tensor([1.0, -1.0], dtype=torch.float32)  # good/bad
        values = value_net(all_trajs)
        loss_value = criterion_mse(values, labels)

        # Backward
        loss_policy.backward(retain_graph=True)
        loss_value.backward()

        optimizer_p.step()
        optimizer_v.step()

        if epoch % 100 == 0:
            print(f"Epoch {epoch}, Policy Loss: {loss_policy.item():.4f}, Value Loss: {loss_value.item():.4f}")


# -----------------------------
# 7. Test & Visualize
# -----------------------------

def test_and_visualize(policy_net, value_net):
    env = DrivingEnvironment()
    obs = env.reset()
    states = []
    actions = []

    with torch.no_grad():
        for _ in range(200):
            state_tensor = torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
            candidates = policy_net(state_tensor)
            base_traj = candidates[0, 0]

            mc_samples = sample_perturbed_trajectories(base_traj, num_samples=16)
            best_traj = select_best_trajectory(mc_samples, value_net)

            # Convert first step of best_traj into control action
            dx = best_traj[1, 0] - best_traj[0, 0]
            dy = best_traj[1, 1] - best_traj[0, 1]
            desired_yaw = np.arctan2(dy, dx)
            steering = (desired_yaw - env.ego_yaw) * 2.0  # PD-like
            acc = 1.0 if env.speed < 25 else 0.0

            action = np.array([steering, acc])
            obs, reward, done = env.step(action)

            states.append([env.ego_x, env.ego_y])
            actions.append(action)

            if done:
                break

    states = np.array(states)
    plt.figure(figsize=(10, 6))
    plt.plot(states[:, 0], states[:, 1], 'b-', label='Ego Vehicle Path')
    plt.scatter([env.obstacle_x], [env.obstacle_y], c='red', s=100, label='Obstacle Final')
    plt.axvline(x=50, color='gray', linestyle='--', alpha=0.5)
    plt.xlabel("X (m)")
    plt.ylabel("Y (m)")
    plt.title("Tesla-Inspired NN + Monte Carlo Planning Simulation")
    plt.legend()
    plt.grid(True)
    plt.show()


# -----------------------------
# 8. Run Everything
# -----------------------------

if __name__ == "__main__":
    # Seed
    torch.manual_seed(42)
    np.random.seed(42)

    # Initialize networks
    policy_net = PolicyNet(input_dim=7, num_trajectories=5, horizon=10)
    value_net = ValueNet()

    print("🔍 Training Policy and Value Networks...")
    train_policy_and_value(policy_net, value_net, epochs=1000)

    print("\n📊 Testing and Visualizing...")
    test_and_visualize(policy_net, value_net)
