import torch
import torch.nn as nn
import numpy as np
import cv2
from .interfuser import *  # 假设模型定义保存在 interfuser.py 文件中
import json

def load_model(model_path):
    model = interfuser_baseline()  # 确保调用的是正确的类名
    checkpoint = torch.load(model_path)
    missing, unexpected = model.load_state_dict(checkpoint['state_dict'], strict=False)
    if missing:
        print("Missing keys in state_dict:", missing)
    if unexpected:
        print("Unexpected keys in state_dict:", unexpected)
    return model

# 从路径加载图片数据
def load_image(image_path):
    image = cv2.imread(image_path)
    if image is None:
        raise ValueError(f"Failed to load image from {image_path}")
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = cv2.resize(image, (224, 224))  # 调整大小到 224x224
    image = np.transpose(image, (2, 0, 1))  # 转换为 (C, H, W)
    image = torch.tensor(image, dtype=torch.float32) / 255.0  # 归一化到 [0, 1]
    return image.unsqueeze(0)  # 增加 batch 维度

# 从路径加载雷达数据和测量数据
def load_lidar(lidar_path):
    # 加载 .npy 文件
    lidar = np.load(lidar_path)  # 形状为 (13693, 4)
    
    print(f"Lidar raw data: {lidar}")
    print(f"Lidar raw shape: {lidar.shape}")
    
    # 提取其中一列数据（假设使用第 3 列，比如深度）
    lidar_image = lidar[:, 2]  # 提取深度数据或其他特征
    
    # 动态计算合适的高度和宽度
    total_points = lidar_image.shape[0]
    height = int(np.sqrt(total_points))  # 选择接近平方根的高度
    width = total_points // height       # 确保宽度和高度匹配
    lidar_image = lidar_image[:height * width].reshape(height, width)  # 截断并重塑

    print(f"Reshaped lidar image to: {lidar_image.shape}")

    # 扩展到 3 通道
    lidar_image = np.stack([lidar_image] * 3, axis=0)  # [channels, height, width]
    lidar_image = np.expand_dims(lidar_image, axis=0)  # 增加 batch 维度
    
    print(f"Lidar processed shape: {lidar_image.shape}")
    
    return torch.tensor(lidar_image, dtype=torch.float32)

def flatten_dict(d):
    result = []
    for key, value in d.items():
        if isinstance(value, dict):
            result.extend(flatten_dict(value))  # 递归处理嵌套字典
        elif isinstance(value, (list, tuple)):
            result.extend(flatten_list(value))  # 调用专门处理嵌套列表的函数
        elif isinstance(value, (int, float, bool)):  # 只保留数字和布尔值
            result.append(float(value))  # 将布尔值转换为 0.0 或 1.0
        else:
            continue  # 忽略其他非数值类型
    return result

def flatten_list(lst):
    result = []
    for item in lst:
        if isinstance(item, (list, tuple)):
            result.extend(flatten_list(item))  # 递归展开嵌套列表
        elif isinstance(item, (int, float, bool)):
            result.append(float(item))  # 将布尔值转换为 0.0 或 1.0
        else:
            continue  # 忽略其他非数值类型
    return result

def load_measurements(measurements_path):
    with open(measurements_path, 'r') as f:
        data = json.load(f)  # 加载 JSON 数据

    # 展平嵌套结构并提取数值
    if isinstance(data, dict):
        data = flatten_dict(data)
    elif isinstance(data, (list, tuple)):
        data = flatten_list(data)

    # 转换为张量
    return torch.tensor(data, dtype=torch.float32).unsqueeze(0)


# 准备输入数据
def prepare_input_data(front_image_path, left_image_path, right_image_path, front_center_image_path, lidar_path, measurements_path):
    front_image = load_image(front_image_path)
    left_image = load_image(left_image_path)
    right_image = load_image(right_image_path)
    front_center_image = load_image(front_center_image_path)
    lidar = load_lidar(lidar_path)
    measurements = load_measurements(measurements_path)
    
    target_point = torch.tensor([[5.0, 2.0]])  # 固定目标点数据
  # 示例数据，调整为实际模型的输入大小

    return front_image, left_image, right_image, front_center_image, lidar, measurements, target_point

class GRUWaypointsPredictorWithCommand(nn.Module):
    def forward(self, x, target_point, measurements):
        bs, n, dim = x.shape
        print(f"Input to GRU shape: {x.shape}")

        # 如果序列长度过长，截取到 waypoints 数量
        if n > self.waypoints:
            x = x[:, :self.waypoints, :]  # 截取序列长度
        elif n < self.waypoints:
            raise ValueError(f"Sequence length {n} is less than waypoints {self.waypoints}. Adjust the input.")

        mask = measurements[:, :6, None, None]
        mask = mask.repeat(1, 1, self.waypoints, 2)

        z = self.encoder(target_point).unsqueeze(0)
        outputs = []
        for i in range(6):
            output, _ = self.grus[i](x, z)
            output = output.reshape(bs * self.waypoints, -1)
            output = self.decoders[i](output).reshape(bs, self.waypoints, 2)
            output = torch.cumsum(output, 1)
            outputs.append(output)
        outputs = torch.stack(outputs, 1)
        output = torch.sum(outputs * mask, dim=1)
        return output


# 进行预测
def predict(model, front_image, left_image, right_image, front_center_image, lidar, measurements, target_point):
    model.eval()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)

    front_image = front_image.to(device)
    left_image = left_image.to(device)
    right_image = right_image.to(device)
    front_center_image = front_center_image.to(device)
    lidar = lidar.to(device)
    measurements = measurements.to(device)
    target_point = target_point.to(device)

    with torch.no_grad():
        features = model.forward_features(front_image, left_image, right_image, front_center_image, lidar, measurements)
        print(f"Features shape: {features.shape}")
        
        # 如果序列长度过长，进行截取
        max_seq_len = 10  # 对应 waypoints 的数量
        features = features[:max_seq_len, :, :]  # 截取序列长度
        print(f"Trimmed features shape: {features.shape}")
        
        features = features.permute(1, 0, 2)  # 转换为 (batch_size, seq_len, input_dim)
        print(f"Permuted features shape: {features.shape}")
        
        waypoints = model.waypoints_generator(features, target_point)

    return waypoints


if __name__ == "__main__":
    model_path = "/home/wsq/carla/InterFuser/interfuser/timm/models/interfuser.pth.tar"
    front_image_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/front.jpg"
    left_image_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/left.jpg"
    right_image_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/right.jpg"
    front_center_image_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/front.jpg"
    lidar_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/lidar/0024.npy"
    measurements_path = "/home/wsq/carla/InterFuser/leaderboard/team_code/routes_town01_long_w16_08_13_08_35_38/measurements/0024.json"

    model = load_model(model_path)
    front_image, left_image, right_image, front_center_image, lidar, measurements, target_point = prepare_input_data(
        front_image_path, left_image_path, right_image_path, front_center_image_path, lidar_path, measurements_path
    )

    predicted_waypoints = predict(model, front_image, left_image, right_image, front_center_image, lidar, measurements, target_point)

    print("Predicted Waypoints:", predicted_waypoints)

