import os
import json
import torch
import numpy as np

from tqdm import tqdm
from pathlib import Path
from PIL import Image, ImageDraw
from typing import Dict, List, Union
from data_process.structure import Point3D, RtkData
from utils.coordinate_transformer import CoordinateTransformer


class BEVAnnotationProcessor:
    @staticmethod
    def load_annotation(annotation_path: Union[str, Path], meta_path: Union[str, Path]) -> Dict[str, List[List[List[float]]]]:
        with open(annotation_path, 'r', encoding='utf-8') as f:
            annotation = json.load(f)
        
        with open(meta_path, 'r', encoding='utf-8') as f:
            meta = json.load(f)
        
        if (annotation['imageWidth'] - meta['width_px'] > 1 or 
            annotation['imageHeight'] - meta['height_px'] > 1):
            raise ValueError(
                f"Image dimension mismatch! Annotation: {annotation['imageWidth']}x{annotation['imageHeight']}, "
                f"Meta: {meta['width_px']}x{meta['height_px']}"
            )
        
        if not annotation['shapes']:
            raise ValueError("No shapes found in annotation file")
        
        label_dict = {}
        
        for shape in annotation['shapes']:
            label = shape['label']
            if label not in label_dict:
                label_dict[label] = {}
            
            points = BEVAnnotationProcessor._pixel_to_physical(shape['points'], meta)
            key = str(len(label_dict[label]))
            label_dict[label][key] = points
        
        ref_rtk = RtkData(
            latitude=meta['lat0_lon0'][0],
            longitude=meta['lat0_lon0'][1],
            height=meta['h0'],
            heading=meta['heading'],
            roll=0.0,
            pitch=0.0,
            timeStamp=0,
        )

        return label_dict, ref_rtk, meta

    @staticmethod
    def _pixel_to_physical(
        points: List[List[float]],
        meta: Dict[str, Union[float, List[float], int]],
    ) -> List[Point3D]:
        left_bottom = meta["left_bottom"]
        right_top = meta["right_top"]
        width_px = meta["width_px"]
        height_px = meta["height_px"]
        
        physical_points = []
        
        for px, py in points:
            nx = px / width_px
            ny = 1.0 - py / height_px
            
            physical_x = left_bottom[0] + nx * (right_top[0] - left_bottom[0])
            physical_y = left_bottom[1] + ny * (right_top[1] - left_bottom[1])
            
            physical_points.append(Point3D(physical_x, physical_y, 0.0))
        
        return physical_points

    @staticmethod
    def generate_tensor_labels(
        annotation: Dict[str, Dict[str, List[Point3D]]],
        rtk_data: Dict[str, RtkData],
        ref_rtk: RtkData,
        cfg: Dict[str, any]
    ) -> Dict[str, Dict[str, torch.Tensor]]:
        output_dir = cfg.get('label_image_output_folder')
        os.makedirs(output_dir, exist_ok=True)
        
        x_res = cfg.get('x_res', 0.1)
        y_res = cfg.get('y_res', 0.1)

        bev_x_range = cfg.get('x_range')
        bev_x_range = (
            np.floor(bev_x_range[0] / x_res) * x_res,
            np.ceil(bev_x_range[1] / x_res) * x_res
        )

        bev_y_range = cfg.get('y_range')
        bev_y_range = (
            np.floor(bev_y_range[0] / y_res) * y_res,
            np.ceil(bev_y_range[1] / y_res) * y_res
        )

        bev_width = int(round((bev_x_range[1] - bev_x_range[0]) / x_res))
        bev_height = int(round((bev_y_range[1] - bev_y_range[0]) / y_res))

        results = {}
        
        for id, curr_rtk in tqdm(rtk_data.items(), desc="Processing vehicle positions"):
            id_results = {}
            class_masks = {}
            
            for class_name in ["0", "1", "2", "3"]:
                if class_name not in annotation:
                    continue
                img = Image.new('L', (bev_width, bev_height), 0)
                draw = ImageDraw.Draw(img)

                for _, points in annotation[class_name].items():
                    image_points = []
                    points = {id: points}
                    rtk = {id: RtkData(
                        timeStamp=0,
                        longitude=curr_rtk.longitude,
                        latitude=curr_rtk.latitude,
                        height=curr_rtk.height,
                        pitch=0.0,
                        roll=0.0,
                        heading=curr_rtk.heading,
                        )}
                    points = CoordinateTransformer.transform_points(
                        points, rtk, ref_rtk, True
                    )
                    points = points[id]
                    for point in points:
                        pixel_x = int(np.round(
                            ((point.x - bev_x_range[0]) / 
                            (bev_x_range[1] - bev_x_range[0])) * 
                            (bev_width - 1)
                        ))
                        
                        pixel_y = int(np.round(
                            (bev_y_range[1] - point.y) / 
                            (bev_y_range[1] - bev_y_range[0]) * 
                            (bev_height - 1)
                        ))
                        
                        image_points.append((pixel_x, pixel_y))
                    
                    if len(image_points) >= 3:
                        draw.polygon(image_points, fill=255)
                
                class_masks[class_name] = np.array(img).astype(np.float32) / 255.0

                np_image = np.array(img).astype(np.float32) / 255.0
                tensor_image = torch.from_numpy(np_image)
                id_results[class_name] = tensor_image
            
            id_results = {}
        
            if "0" in class_masks:
                background = class_masks["0"]
                if "2" in class_masks:
                    background = np.clip(background - class_masks["2"], 0, 1)
                if "3" in class_masks:
                    background = np.clip(background - class_masks["3"], 0, 1)
                id_results["0"] = torch.from_numpy(background)
            
            if "1" in class_masks:
                id_results["1"] = torch.from_numpy(class_masks["1"])

            if cfg.get('debug', False):
                for class_name in ["0", "1"]:
                    if class_name in id_results:
                        img = Image.fromarray((id_results[class_name].numpy() * 255).astype(np.uint8))
                        img.save(os.path.join(output_dir, f"{cfg.get('data_name')}_{id}_{class_name}.png"))
            
            results[id] = id_results
        
        return results
    