import numpy as np
from typing import List, Tuple, Dict, Any

class Preprocessor:
    def __init__(self):
        self.reset()
    
    def reset(self):
        """Reset preprocessor state"""
        self.discovered_treasures = {}  # config_id -> (x, z, status) 
        self.discovered_buffs = {}      # config_id -> (x, z, status)
        self.discovered_endpoints = {}  # config_id -> (x, z, status)
        self.discovered_spawns = {}     # config_id -> (x, z, status)
        self.previous_hero_pos = None
        self.map_memory = np.zeros((127, 127), dtype=np.float32)  # Global map memory
        self.step_count = 0
        
        # Reward tracking variables
        self.previous_total_score = 0.0
        self.previous_treasure_count = 0
        self.previous_explored_cells = 0
        self.previous_min_treasure_dist = float('inf')
        self.previous_endpoint_dist = float('inf')
        self.discovered_treasure_ids = set()
        self.no_movement_count = 0
        self.last_positions = []  # For detecting loops

    def _calculate_reward(self, hero_pos: Tuple[int, int], score_info: Dict) -> List[float]:
        """Calculate comprehensive reward with score difference and intelligent shaping"""
        hero_x, hero_z = hero_pos
        
        # === 1. BASE REWARD: Score Difference ===
        current_total_score = score_info['total_score']
        score_diff = current_total_score - self.previous_total_score
        base_reward = score_diff / 100.0  # Normalize to reasonable range
        
        # === 2. DISCOVERY REWARDS ===
        discovery_reward = 0.0
        
        # New treasure discovery bonus
        current_treasure_count = len(self.discovered_treasures)
        if current_treasure_count > self.previous_treasure_count:
            discovery_reward += 0.1 * (current_treasure_count - self.previous_treasure_count)
        
        # Endpoint discovery bonus
        if 22 in self.discovered_endpoints and 22 not in getattr(self, '_prev_endpoints', set()):
            discovery_reward += 0.2
        
        # === 3. PROGRESS REWARDS ===
        progress_reward = 0.0
        
        # Progress toward nearest treasure
        if self.discovered_treasures:
            available_treasures = {k: v for k, v in self.discovered_treasures.items() 
                                if v[2] == 1}  # status == 1 (available)
            if available_treasures:
                min_treasure_dist = min(
                    np.sqrt((hero_x - tx) ** 2 + (hero_z - tz) ** 2) 
                    for tx, tz, _ in available_treasures.values()
                )
                if self.previous_min_treasure_dist != float('inf'):
                    dist_improvement = self.previous_min_treasure_dist - min_treasure_dist
                    progress_reward += 0.001 * dist_improvement  # Reward getting closer
                self.previous_min_treasure_dist = min_treasure_dist
        
        # Progress toward endpoint
        if 22 in self.discovered_endpoints:
            end_x, end_z, _ = self.discovered_endpoints[22]
            endpoint_dist = np.sqrt((hero_x - end_x) ** 2 + (hero_z - end_z) ** 2)
            if self.previous_endpoint_dist != float('inf'):
                dist_improvement = self.previous_endpoint_dist - endpoint_dist
                progress_reward += 0.002 * dist_improvement  # Higher weight for endpoint
            self.previous_endpoint_dist = endpoint_dist
        
        # === 4. EXPLORATION REWARDS ===
        exploration_reward = 0.0
        
        # New area exploration bonus
        current_explored = np.count_nonzero(self.map_memory)
        new_cells_explored = current_explored - self.previous_explored_cells
        if new_cells_explored > 0:
            exploration_reward += 0.001 * new_cells_explored
        
        # === 5. EFFICIENCY PENALTIES ===
        efficiency_penalty = 0.0
        
        # No movement penalty (standing still)
        if self.previous_hero_pos is not None:
            if hero_pos == self.previous_hero_pos:
                self.no_movement_count += 1
                efficiency_penalty -= 0.01 * self.no_movement_count  # Escalating penalty
            else:
                self.no_movement_count = 0
        
        # Loop detection penalty (visiting same positions repeatedly)
        self.last_positions.append(hero_pos)
        if len(self.last_positions) > 10:
            self.last_positions.pop(0)
        
        if len(self.last_positions) >= 6:
            recent_positions = self.last_positions[-6:]
            if len(set(recent_positions)) <= 3:  # Visiting only 3 unique positions in last 6 steps
                efficiency_penalty -= 0.005
        
        # === 6. TIME PRESSURE ===
        time_penalty = -0.001  # Small constant time penalty
        
        # === 7. SKILL USAGE ENCOURAGEMENT ===
        skill_reward = 0.0
        # Small bonus for having flash available when far from objectives
        if self.discovered_endpoints or self.discovered_treasures:
            # Calculate if flash might be useful (far from objectives)
            min_obj_dist = float('inf')
            if 22 in self.discovered_endpoints:
                end_x, end_z, _ = self.discovered_endpoints[22]
                min_obj_dist = min(min_obj_dist, np.sqrt((hero_x - end_x) ** 2 + (hero_z - end_z) ** 2))
            
            for tx, tz, status in self.discovered_treasures.values():
                if status == 1:  # Available treasure
                    dist = np.sqrt((hero_x - tx) ** 2 + (hero_z - tz) ** 2)
                    min_obj_dist = min(min_obj_dist, dist)
            
            # Encourage flash usage when far from objectives (>20 units)
            if min_obj_dist > 20:
                skill_reward += 0.001
        
        # === COMBINE ALL REWARDS ===
        total_reward = (
            base_reward +           # Score difference (main signal)
            discovery_reward +      # Discovery bonuses
            progress_reward +       # Progress toward objectives  
            exploration_reward +    # Exploration bonuses
            efficiency_penalty +    # Penalties for inefficient behavior
            time_penalty +         # Time pressure
            skill_reward           # Skill usage encouragement
        )
        
        # === UPDATE TRACKING VARIABLES ===
        self.previous_total_score = current_total_score
        self.previous_treasure_count = current_treasure_count
        self.previous_explored_cells = current_explored
        self._prev_endpoints = set(self.discovered_endpoints.keys())
        
        return [total_reward]
        
    def process(self, obs_data: List[Any], last_action: int) -> Tuple[np.ndarray, np.ndarray, List[float]]:
        """
        Process observation data and return feature vector, legal actions, and rewards
        
        Args:
            obs_data: [obs, extra_info] where obs contains the observation dict
            last_action: Previous action taken (-1 if first step)
            
        Returns:
            feature_vec: Processed feature vector for the neural network
            legal_actions: Boolean array indicating valid actions
            reward_list: List of rewards/scores
        """
        obs, extra_info = obs_data
        
        # Extract components
        frame_state = obs['frame_state']
        score_info = obs['score_info']
        map_info = obs['map_info']
        legal_act = obs['legal_act']
        
        # Update step count
        self.step_count = frame_state['step_no']
        
        # Extract hero information
        hero = frame_state['heroes'][0]  # Assuming single hero
        hero_pos = (hero['pos']['x'], hero['pos']['z'])
        hero_x, hero_z = hero_pos
        
        # Update map memory with current local view
        self._update_map_memory(map_info, hero_x, hero_z)
        
        # Update discovered objects
        self._update_discovered_objects(frame_state['organs'])
        
        # Build feature vector
        feature_vec = self._build_feature_vector(
            hero, map_info, score_info, last_action, hero_pos
        )
        
        # Process legal actions (convert to boolean array for actions 0-15)
        legal_actions = np.zeros(16, dtype=bool)
        legal_actions[:8] = legal_act[0]  # Movement actions 0-7
        legal_actions[8:16] = legal_act[1]  # Flash actions 8-15
        
        # Calculate rewards
        reward_list = self._calculate_reward(hero_pos, score_info)
        
        # Update previous position
        self.previous_hero_pos = hero_pos
        print("feature_vec", feature_vec.shape, feature_vec)
        return feature_vec, legal_actions, reward_list
    
    def _update_map_memory(self, map_info: List[Dict], hero_x: int, hero_z: int):
        """Update global map memory with current 51x51 local view"""
        local_size = 51
        half_size = local_size // 2
        
        # Calculate global coordinates for the local view
        start_x = max(0, hero_x - half_size)
        end_x = min(127, hero_x + half_size + 1)
        start_z = max(0, hero_z - half_size)  
        end_z = min(127, hero_z + half_size + 1)
        
        # Extract local map data
        local_map = np.array([[row['values'][i] for i in range(local_size)] 
                             for row in map_info])
        
        # Calculate offsets for partial views at map boundaries
        local_start_x = max(0, half_size - hero_x)
        local_end_x = min(local_size, local_size - (hero_x + half_size + 1 - 127))
        local_start_z = max(0, half_size - hero_z)
        local_end_z = min(local_size, local_size - (hero_z + half_size + 1 - 127))
        
        # Update global memory
        self.map_memory[start_z:end_z, start_x:end_x] = \
            local_map[local_start_z:local_end_z, local_start_x:local_end_x]
    
    def _update_discovered_objects(self, organs: List[Dict]):
        """Update discovered objects with exact positions"""
        for organ in organs:
            config_id = organ['config_id']
            sub_type = organ['sub_type']
            pos = (organ['pos']['x'], organ['pos']['z'])
            status = organ['status']
            
            if sub_type == 1:  # Treasure
                self.discovered_treasures[config_id] = (*pos, status)
            elif sub_type == 2:  # Speed buff
                self.discovered_buffs[config_id] = (*pos, status)
            elif sub_type == 3:  # Spawn point
                self.discovered_spawns[config_id] = (*pos, status)
            elif sub_type == 4:  # Endpoint
                self.discovered_endpoints[config_id] = (*pos, status)
    
    def _build_feature_vector(self, hero: Dict, map_info: List[Dict], 
                            score_info: Dict, last_action: int, hero_pos: Tuple[int, int]) -> np.ndarray:
        """Build comprehensive feature vector"""
        features = []
        
        # 1. Hero state features (6 features)
        hero_x, hero_z = hero_pos
        features.extend([
            hero_x / 127.0,  # Normalized x position
            hero_z / 127.0,  # Normalized z position  
            hero['talent']['status'],  # Flash skill availability
            hero['talent']['cooldown'] / 100.0,  # Normalized cooldown
            hero['speed_up'],  # Speed buff status
            hero['buff_remain_time'] / 51.0,  # Normalized buff remaining time
        ])
        
        # 2. Local map features - flatten 51x51 map (2601 features)
        #local_map = np.array([[row['values'][i] for i in range(51)] for row in map_info])
        #features.extend(local_map.flatten())
        
        # 3. Discovered treasures (up to 13 treasures, 4 features each = 52 features)
        treasure_features = []
        for i in range(1, 14):  # Treasure IDs 1-13
            if i in self.discovered_treasures:
                tx, tz, status = self.discovered_treasures[i]
                # Relative position and status
                treasure_features.extend([
                    (tx - hero_x) / 127.0,  # Relative x
                    (tz - hero_z) / 127.0,  # Relative z  
                    status,  # Status (1=available, 0=collected, -1=unknown)
                    1.0  # Discovered flag
                ])
            else:
                treasure_features.extend([0.0, 0.0, -1.0, 0.0])  # Unknown treasure
        features.extend(treasure_features)
        
        # 4. Discovered endpoints (1 endpoint, 4 features)
        if 22 in self.discovered_endpoints:
            ex, ez, status = self.discovered_endpoints[22]
            features.extend([
                (ex - hero_x) / 127.0,
                (ez - hero_z) / 127.0,
                status,
                1.0
            ])
        else:
            features.extend([0.0, 0.0, -1.0, 0.0])
            
        # 5. Discovered speed buffs (1 buff, 4 features)  
        if 0 in self.discovered_buffs:
            bx, bz, status = self.discovered_buffs[0]
            features.extend([
                (bx - hero_x) / 127.0,
                (bz - hero_z) / 127.0,
                status,
                1.0
            ])
        else:
            features.extend([0.0, 0.0, -1.0, 0.0])
        
        # 6. Score and progress features (5 features)
        features.extend([
            score_info['total_score'] / 1000.0,  # Normalized total score
            score_info['treasure_collected_count'] / 13.0,  # Progress ratio
            score_info['treasure_score'] / 1300.0,  # Normalized treasure score
            score_info['buff_count'] / 10.0,  # Normalized buff count
            self.step_count / 300.0,  # Normalized step progress
        ])
        
        # 7. Movement history features (3 features)
        if self.previous_hero_pos is not None:
            prev_x, prev_z = self.previous_hero_pos
            features.extend([
                (hero_x - prev_x) / 127.0,  # X movement delta
                (hero_z - prev_z) / 127.0,  # Z movement delta
                last_action / 15.0 if last_action >= 0 else 0.0,  # Normalized last action
            ])
        else:
            features.extend([0.0, 0.0, 0.0])
            
        # 8. Exploration features (4 features)
        # Calculate exploration metrics from map memory
        total_cells = 127 * 127
        explored_cells = np.count_nonzero(self.map_memory)
        walkable_cells = np.count_nonzero(self.map_memory == 1)
        
        features.extend([
            explored_cells / total_cells,  # Exploration ratio
            walkable_cells / max(explored_cells, 1),  # Walkability ratio
            len(self.discovered_treasures) / 13.0,  # Treasure discovery ratio
            1.0 if 22 in self.discovered_endpoints else 0.0,  # Endpoint discovered
        ])
        
        return np.array(features, dtype=np.float32)