import cv2
import xml.etree.ElementTree as ET
import os
import re
from collections import defaultdict
import numpy as np
import torch
from sgan.models import TrajectoryGenerator
from attrdict import AttrDict
from sgan.utils import relative_to_abs, get_dset_path

def sigmoid(x):
    return 1 / (1 + np.exp(-x))

def collision_probability(traj1, traj2, threshold):
    """
    计算两个轨迹之间的碰撞概率。
    :param traj1: 第一个轨迹的连续点列表 [(x1, y1), (x2, y2), ...]
    :param traj2: 第二个轨迹的连续点列表 [(x3, y3), (x4, y4), ...]
    :param threshold: 碰撞的距离阈值
    :return: 碰撞概率
    """
    min_distance = np.inf
    for point1 in traj1:
        for point2 in traj2:
            distance = np.linalg.norm(np.array(point1) - np.array(point2))
            min_distance = min(min_distance, distance)
            # print(min_distance)

    # 距离越小，碰撞概率越高
    collision_prob = sigmoid(threshold - min_distance)
    return collision_prob


def get_generator(checkpoint):
    args = AttrDict(checkpoint['args'])
    generator = TrajectoryGenerator(
        obs_len=args.obs_len,
        pred_len=args.pred_len,
        embedding_dim=args.embedding_dim,
        encoder_h_dim=args.encoder_h_dim_g,
        decoder_h_dim=args.decoder_h_dim_g,
        mlp_dim=args.mlp_dim,
        num_layers=args.num_layers,
        noise_dim=args.noise_dim,
        noise_type=args.noise_type,
        noise_mix_type=args.noise_mix_type,
        pooling_type=args.pooling_type,
        pool_every_timestep=args.pool_every_timestep,
        dropout=args.dropout,
        bottleneck_dim=args.bottleneck_dim,
        neighborhood_size=args.neighborhood_size,
        grid_size=args.grid_size,
        batch_norm=args.batch_norm)
    generator.load_state_dict(checkpoint['g_state'])
    # generator.cuda()
    # generator.train()
    generator.eval()
    return generator


def preprocess_one(history_points, ):
    scale = 80
    history_traj = history_points * scale
    # 将这个数组转换成生成器可以接受的输入格式
    # 需要注意的是，生成器接受形状为[obs_len, num_peds, 2]的张量作为输入，所以我们需要增加一些维度
    history_traj = np.expand_dims(history_traj, 1)
    history_traj = torch.tensor(history_traj, dtype=torch.float)
    # 创建相对坐标的历史轨迹
    history_traj_np = history_traj.numpy()
    history_traj_rel_np = np.diff(history_traj_np, axis=0)
    history_traj_rel_np = np.concatenate((np.zeros((1, 1, 2)), history_traj_rel_np), axis=0)
    history_traj_rel = torch.tensor(history_traj_rel_np, dtype=torch.float)

    # 由于我们只有一个行人，所以seq_start_end是[[0, 1]]
    seq_start_end = torch.tensor([[0, 1]])

    # 使用生成器预测未来的轨迹
    with torch.no_grad():
        pred_traj_fake_rel = generator(history_traj, history_traj_rel, seq_start_end)
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, history_traj[-1])

    pred_traj_fake_np = pred_traj_fake.numpy().reshape(-1, 2) / scale

    return pred_traj_fake_np


def preprocess_multiple(history_points_list):
    scale = 80
    all_history_trajs = []
    all_history_trajs_rel = []
    seq_start_end = []

    # 累计的行人数量
    total_peds = 0

    # 对于每个行人，进行预处理
    for history_points in history_points_list:
        history_traj = history_points * scale
        history_traj = np.expand_dims(history_traj, 1)
        history_traj = torch.tensor(history_traj, dtype=torch.float)

        history_traj_np = history_traj.numpy()
        history_traj_rel_np = np.diff(history_traj_np, axis=0)
        history_traj_rel_np = np.concatenate((np.zeros((1, 1, 2)), history_traj_rel_np), axis=0)
        history_traj_rel = torch.tensor(history_traj_rel_np, dtype=torch.float)

        all_history_trajs.append(history_traj)
        all_history_trajs_rel.append(history_traj_rel)

        num_peds = history_traj.shape[1]
        seq_start_end.append((total_peds, total_peds + num_peds))
        total_peds += num_peds

    all_history_trajs = torch.cat(all_history_trajs, dim=1)
    all_history_trajs_rel = torch.cat(all_history_trajs_rel, dim=1)
    seq_start_end = torch.tensor(seq_start_end)

    # 使用生成器预测未来的轨迹
    with torch.no_grad():
        pred_traj_fake_rel = generator(all_history_trajs, all_history_trajs_rel, seq_start_end)
        pred_traj_fake = relative_to_abs(pred_traj_fake_rel, all_history_trajs[-1])

    pred_traj_fake_np = pred_traj_fake.numpy().reshape(-1, 2) / scale

    return pred_traj_fake_np


def parse_annotation(xml_path):
    tree = ET.parse(xml_path)
    root = tree.getroot()

    objects = []
    for obj in root.findall("object"):
        name = obj.find("name").text
        bndbox = obj.find("bndbox")
        xmin = int(bndbox.find("xmin").text)
        ymin = int(bndbox.find("ymin").text)
        xmax = int(bndbox.find("xmax").text)
        ymax = int(bndbox.find("ymax").text)

        match = re.search(r"_(\d+)$", name)
        color_id = int(match.group(1)) if match else -1  # 仅用于选择颜色

        objects.append((name, (xmin, ymin, xmax, ymax), color_id))

    return objects


def calculate_sector_boundary_points(start_point, end_point, angle_increment):
    """
    Calculate the two boundary points of a sector given a start and end point,
    and an angle increment for the sector's angular range.

    :param start_point: Tuple representing the start point (x, y)
    :param end_point: Tuple representing the end point (x, y)
    :param angle_increment: The angle increment b in degrees to determine the sector's range
    :return: A tuple containing the coordinates of the two sector boundary points
    """
    # Convert angle increment from degrees to radians
    angle_increment_rad = np.radians(angle_increment)

    # Calculate the angle of the line from start to end point
    angle = np.arctan2(end_point[1] - start_point[1], end_point[0] - start_point[0])

    # Calculate the distance from the start to the end point
    radius = np.linalg.norm(np.array(end_point) - np.array(start_point))

    # Calculate the angles of the two sector boundary lines
    angle1 = angle - angle_increment_rad
    angle2 = angle + angle_increment_rad

    # Calculate the coordinates of the two sector boundary points
    boundary_point1 = (start_point[0] + radius * np.cos(angle1), start_point[1] + radius * np.sin(angle1))
    boundary_point2 = (start_point[0] + radius * np.cos(angle2), start_point[1] + radius * np.sin(angle2))
    boundary_point1 = np.asarray(boundary_point1)
    boundary_point2 = np.asarray(boundary_point2)

    return boundary_point1, boundary_point2

def display_frames_with_annotations(folder_path, N):
    files = sorted(os.listdir(folder_path))
    images = [f for f in files if f.endswith('.png')]
    xmls = [f for f in files if f.endswith('.xml')]

    colors = [
        (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
        (0, 255, 255), (255, 0, 255), (128, 0, 0), (0, 128, 0),
        # ...添加更多颜色，直到20个
    ]

    trajectories = defaultdict(list)  # 用于存储每个ID的轨迹点
    trajectories_boxes = dict()
    frame_count = 0

    for img_file, xml_file in zip(images, xmls):
        img_path = os.path.join(folder_path, img_file)
        xml_path = os.path.join(folder_path, xml_file)

        img = cv2.imread(img_path)
        h, w , _ = img.shape

        annotations = parse_annotation(xml_path)
        for obj_id, (xmin, ymin, xmax, ymax), color_id in annotations:
            # 计算框的中心点
            center = (int((xmin + xmax) / 2), int((ymin + ymax) / 2))

            # 每隔N帧存储一个历史点
            if frame_count % N == 0:
                trajectories[obj_id].append(center)
                trajectories_boxes[obj_id] = [xmin, ymin, xmax, ymax]
                if len(trajectories[obj_id]) > TOTAL:
                    trajectories[obj_id].pop(0)  # 删除最早的点

            color = colors[color_id % len(colors)]

            # 绘制轨迹点
            for point in trajectories[obj_id]:
                cv2.circle(img, point, 2, color, -1)  # 绘制轨迹点

            # 绘制矩形框和ID
            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
            cv2.putText(img, str(obj_id), (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)

        frame_count += 1
        # 在第20帧保存2号id的8个历史点
        # if frame_count == 160 and 'etw_3' in trajectories:
        #     np.save("id_2_trajectory.npy", np.array(trajectories['etw_3']) / (w, h))

        IDs = ["etw_2", "etw_3"]
        preds = {}
        for ID in IDs:
            if ID in trajectories:
                if len(trajectories[ID]) >= TOTAL:
                    norm_points = np.array(trajectories[ID]) / (w, h)
                    norm_points_pred = preprocess_one(norm_points)
                    points_pred = norm_points_pred * (w, h)
                    preds[ID] = points_pred
                    # 绘制轨迹点
                    points_pred_int = points_pred.astype(int)
                    for point in points_pred_int:
                        cv2.circle(img, point, 0, (12, 123, 255), 5)  # 绘制轨迹点

                    # cv2.circle(img, points_pred_int[0], 0, (0, 255, 255), 10)
                    # cv2.circle(img, points_pred_int[-1], 0, (0, 255, 255), 10)

                    p2 = calculate_sector_boundary_points(points_pred_int[1], points_pred_int[-1], 30)

                    thickness = 2
                    ps = [points_pred_int[0], p2[0].astype(int), points_pred_int[-1], p2[1].astype(int)]
                    cv2.line(img, ps[0], ps[1], (230, 0, 0), thickness)
                    cv2.line(img, ps[1], ps[2], (230, 0, 0), thickness)
                    cv2.line(img, ps[2], ps[3], (230, 0, 0), thickness)
                    cv2.line(img, ps[3], ps[0], (230, 0, 0), thickness)

                    # for p in p2:
                    #     p = p.astype(int)
                    #     cv2.circle(img, p, 0, (0, 255, 255), 10)


        assert len(IDs) == 2, "只能计算两个对象的碰撞概率"
        if len(preds) == 2:
            ps1 = preds["etw_2"]
            ps2 = preds["etw_3"]
            pcs = collision_probability(ps1, ps2, 10)
            box = trajectories_boxes['etw_2']
            print(box)
            # if pcs > 0.01:
            cv2.putText(img, f"{round(pcs, 3)}", (box[0], box[1] - 20), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3)
            print(pcs)

        cv2.imshow("Frame", img)
        # if cv2.waitKey(40) & 0xFF == ord('q'):  # 40ms per frame
        #     break
        if cv2.waitKey(0) & 0xFF == ord('q'):  # 40ms per frame
            break

    cv2.destroyAllWindows()


if __name__ == "__main__":
    folder_path = "datasets/1/"  # TODO: Replace with your folder path
    N = 3  # 控制间隔
    TOTAL = 12
    checkpoint = torch.load("models/sgan-models/zara1_12_model.pt", map_location=torch.device('cpu'))
    generator = get_generator(checkpoint)

    display_frames_with_annotations(folder_path, N)
