import os
import torch
import numpy as np

from tqdm import tqdm
from typing import Dict, List, Any
from collections import defaultdict
from matplotlib import pyplot as plt
from data_process.plot_bev import BEVGenerator
from data_process.structure import RadarPoint, RtkData
from utils.coordinate_transformer import CoordinateTransformer


class RadarToTensorConverter:
    @staticmethod
    def convert(
        radar_data: Dict[str, List[RadarPoint]],
        rtk_data: Dict[str, RtkData],
        sequences: List[List[str]],
        cfg: Dict[str, Any]
    ) -> Dict[str, torch.Tensor]:
        num_frame = cfg.get('num_frame')
        debug = cfg.get('debug', False)
        result = {}
        
        for seq in tqdm(sequences, desc="Processing sequences"):
            assert len(seq) == num_frame, f"Expected sequence length {num_frame}, but got {len(seq)} for sequence: {seq}"

            batch_data = {}
            for frame_id in seq:
                batch_data[frame_id] = radar_data[frame_id]
            
            ref_rtk = RtkData(
                latitude=rtk_data[seq[0]].latitude,
                longitude=rtk_data[seq[0]].longitude,
                height=rtk_data[seq[0]].height,
                heading=rtk_data[seq[0]].heading,
                roll=0.0,
                pitch=0.0,
                timeStamp=seq[0],
            )

            transformed = CoordinateTransformer.transform_points(
                batch_data,
                rtk_data,
                ref_rtk,
            )

            frame_tensors = []
            grid_list = []
            # if debug:
                # from data_process.plot_3d_points import PointCloudVisualizer
                # PointCloudVisualizer.visualize_3d_points_dynamic(transformed)
                
            for frame_id in seq:
                grids = RadarToTensorConverter.convert_to_tensor(
                    transformed[frame_id],
                    cfg
                )
                tensor = RadarToTensorConverter._normalize_stats(grids, cfg)
                frame_tensors.append(tensor)
                grid_list.append(grids)

            RadarToTensorConverter.visualize_grid_points(grid_list, ref_rtk, cfg)

            result[seq[0]] = frame_tensors
        
        # if debug:
        #     print('Point Count:')
        #     for main_frame_id, frame_tensors in result.items():
        #         point_counts = []
        #         for tensor in frame_tensors:
        #             count = sum(v[0].item() for v in tensor.values())
        #             point_counts.append(f"{count}")
                
        #         print(f"{main_frame_id}\t" + "\t\t".join(point_counts))
            
        #     RadarToTensorConverter.visualize_features(result, cfg)
        return result

    @staticmethod
    def convert_to_tensor(points: List[RadarPoint], cfg: Dict[str, Any]) -> torch.Tensor:
        grids = defaultdict(list)
        for point in points:
            if not (cfg['x_range'][0] <= point.x <= cfg['x_range'][1] and
                    cfg['y_range'][0] <= point.y <= cfg['y_range'][1] and
                    cfg['height_range'][0] <= point.z <= cfg['height_range'][1]):
                continue
            x_bin = int((point.x - cfg['x_range'][0]) / cfg['x_res'])
            y_bin = int((point.y - cfg['y_range'][0]) / cfg['y_res'])
            h_bin = int((point.z - cfg['height_range'][0]) / cfg['height_res'])
            if len(grids[(x_bin, y_bin, h_bin)]) < 20:
                grids[(x_bin, y_bin, h_bin)].append((point.z, point.strength, point.cohen_factor))
        
        return grids

    @staticmethod
    def _normalize_stats(grids: Dict[tuple, tuple], cfg: Dict[str, Any]):
        normalized = {}

        all_log_s = []
        for values in grids.values():
            _, s_vals, _ = zip(*values)
            all_log_s.extend(np.log1p(s_vals))
        s_log_mean = np.mean(all_log_s) if all_log_s else 0
        s_log_std = np.std(all_log_s) if all_log_s else 1

        for (x, y, h), values in grids.items():
            z_vals, s_vals, c_vals = zip(*values)

            n = len(z_vals) # (torch.log(torch.tensor(len(z_vals) - 0.95))) / 3 应该做此映射后加上-0.5 ~ 0.5的随机数，做连续化处理，逻辑放在数据增强中完成
            
            height_mid = cfg['height_res'] / 2 + h * cfg['height_res'] + cfg['height_range'][0]
            height_half_range = cfg['height_res'] / 2
            z_max = (np.max(z_vals) - height_mid) / height_half_range
            z_min = (np.min(z_vals) - height_mid) / height_half_range
            z_mean = (np.mean(z_vals) - height_mid) / height_half_range

            log_s_vals = np.log(s_vals)
            s_max = np.clip((np.max(log_s_vals) - s_log_mean) / s_log_std, -3, 3) / 3
            s_min = np.clip((np.min(log_s_vals) - s_log_mean) / s_log_std, -3, 3) / 3
            s_mean = np.clip((np.mean(log_s_vals) - s_log_mean) / s_log_std, -3, 3) / 3

            c_max = (torch.log(torch.tensor(np.max(c_vals))) + 1.1) / 0.9
            c_min = (torch.log(torch.tensor(np.min(c_vals))) + 1.1) / 0.9
            c_mean = (torch.log(torch.tensor(np.mean(c_vals))) + 1.1) / 0.9
            stats = torch.tensor([
                n,
                z_max, z_min, z_mean,
                s_max, s_min, s_mean,
                c_max, c_min, c_mean
            ], dtype=torch.float32)
            
            normalized[(x, y, h)] = stats
        
        return normalized

    @staticmethod
    def visualize_features(tensor_dict: Dict[str, List[Dict[tuple, torch.Tensor]]], cfg: Dict[str, Any]):
        distribution_output_folder = cfg.get('distribution_output_folder')
        os.makedirs(distribution_output_folder, exist_ok=True)
        
        sample_tensor = next(iter(next(iter(tensor_dict.values())))).values().__iter__().__next__()
        num_features = sample_tensor.shape[0]
        
        for feature_idx in range(num_features):
            plt.figure(figsize=(10, 6))
            
            all_values = []
            for frame_tensors in tensor_dict.values():
                for tensor_dict_per_frame in frame_tensors:
                    for tensor in tensor_dict_per_frame.values():
                        all_values.append(tensor[feature_idx].item())
            
            if not all_values:
                plt.close()
                continue
                
            plt.hist(all_values, bins=50, color='blue', alpha=0.7)
            
            feature_name = f"Feature {feature_idx}"
            plt.title(f'Distribution of {feature_name}')
            plt.xlabel('Value')
            plt.ylabel('Frequency')
            plt.grid(True, alpha=0.3)
            
            output_path = os.path.join(distribution_output_folder, f'feature_{feature_idx}.png')
            plt.savefig(output_path, bbox_inches='tight', dpi=150)
            plt.close()
        
    @staticmethod
    def visualize_grid_points(grid_list: List[Dict[tuple, List[tuple]]], ref_rtk : RtkData, cfg: Dict[str, Any]) -> None:
        cfg['lat0'] = ref_rtk.latitude
        cfg['lon0'] = ref_rtk.longitude
        cfg['h0'] = ref_rtk.height
        cfg['heading'] = ref_rtk.heading
        
        all_points = []
        for grids in grid_list:
            for (x_bin, y_bin, _), values in grids.items():
                for z, strength, cohen in values:
                    x_min = cfg['x_range'][0] + x_bin * cfg['x_res']
                    x_max = x_min + cfg['x_res']
                    y_min = cfg['y_range'][0] + y_bin * cfg['y_res']
                    y_max = y_min + cfg['y_res']
                    
                    for _ in range(10): # 每个点在格子随机画10个点
                        x = np.random.uniform(x_min, x_max)
                        y = np.random.uniform(y_min, y_max)
                        
                        all_points.append(RadarPoint(
                            x=x, y=y, z=z,
                            strength=strength,
                            cohen_factor=cohen,
                            velocity=0.0
                        ))
        
        BEVGenerator.create_bev(
            point_data={"all_frames": all_points},
            cfg=cfg,
            id=f"{cfg['data_name']}_{ref_rtk.timeStamp}",
            images=['all'],
            output_folder=cfg['input_tensor_output_folder'],
            xyrange=(cfg['x_range'], cfg['y_range']),
        )
