import torch

from typing import Any, Dict, List
from data_process.structure import RadarPoint, RtkData
from utils.coordinate_transformer import CoordinateTransformer


class FrameSelector:
    @staticmethod
    def filter_radar_points(radar_points: List[RadarPoint]) -> List[RadarPoint]:
        filtered_points = []
        for point in radar_points:
            if (5 <= point.y <= 50):
                filtered_points.append(point)
        return filtered_points

    @staticmethod
    def select_frame_and_points(radar_data: Dict[str, List[RadarPoint]], rtk_data: Dict[str, RtkData], interval=1):
        sorted_keys = sorted(rtk_data.keys(), key=lambda k: rtk_data[k].timeStamp)
        # total_frames = len(sorted_keys)
        
        # if total_frames <= 20:
        #     return radar_data
        
        # selected_keys = sorted_keys[:10] + sorted_keys[-10:]
        
        # middle_frames = sorted_keys[10:-10]
        # selected_keys.extend(middle_frames[::interval])
        
        # selected_keys = list(dict.fromkeys(selected_keys))
        selected_keys = sorted_keys
        return {k: FrameSelector.filter_radar_points(radar_data[k]) for k in selected_keys}

    
    @staticmethod
    def select_sequences(rtk_data: Dict[str, RtkData], cfg: Dict[str, Any]) -> List[List[str]]:
        min_frame_distance = cfg.get('min_frame_distance')
        num_frame = cfg.get('num_frame')
        frame_interval = cfg.get('frame_interval')

        frames = [frame for frame in rtk_data]
        frames = sorted(frames, key=lambda k: rtk_data[k].timeStamp)
        
        frame_len = (num_frame - 1) * frame_interval + 1
        if len(frames) < frame_len + 20:
            return []
        
        sequences = []
        current_main_index = frame_len + 20
        
        while True:
            if current_main_index >= len(frames):
                break
            
            main_sequence = [
                frames[current_main_index - frame_interval*j] 
                for j in range(num_frame)
            ]
            
            sequences.append(main_sequence)
            
            next_main_index, found = current_main_index + num_frame * frame_interval - 1, False
            while next_main_index < len(frames):
                if FrameSelector._validate_distance(
                    rtk_data[frames[current_main_index]],
                    rtk_data[frames[next_main_index]],
                    min_frame_distance,
                ):
                    current_main_index = next_main_index
                    found = True
                    break
                next_main_index += 1
                
            if not found:
                break
        
        return sequences

    @staticmethod
    def _validate_distance(prev_rtk: RtkData, candidate_rtk: RtkData, min_distance) -> bool:
        ecef1 = CoordinateTransformer._latlonh_to_ecef(
            prev_rtk.latitude, prev_rtk.longitude, prev_rtk.height
        )
        ecef2 = CoordinateTransformer._latlonh_to_ecef(
            candidate_rtk.latitude, candidate_rtk.longitude, candidate_rtk.height
        )
        return torch.norm(ecef1 - ecef2).item() >= min_distance
    