#!/usr/bin/env python3
# -*- coding: UTF-8 -*-

import numpy as np
from typing import List, Tuple, Dict, Any

class Preprocessor:
    def __init__(self):
        self.reset()
    
    def reset(self):
        """Reset preprocessor state"""
        self.discovered_treasures = {}
        self.discovered_buffs = {}
        self.discovered_endpoints = {}
        self.discovered_spawns = {}
        self.previous_hero_pos = None
        self.step_count = 0
        
        # Reward tracking - simplified
        self.previous_total_score = 0.0
        self.previous_treasure_count = 0
        self.no_movement_count = 0
        self.last_positions = []
        self._prev_endpoints = set()

    def _calculate_reward(self, hero_pos: Tuple[int, int], score_info: Dict) -> List[float]:
        """Fast reward calculation"""
        hero_x, hero_z = hero_pos
        
        # Base reward from score
        current_total_score = score_info['total_score']
        score_diff = current_total_score - self.previous_total_score
        base_reward = score_diff / 10.0
        
        # Discovery reward
        discovery_reward = 0.0
        current_treasure_count = len(self.discovered_treasures)
        if current_treasure_count > self.previous_treasure_count:
            discovery_reward += 0.5 * (current_treasure_count - self.previous_treasure_count)
        
        if 22 in self.discovered_endpoints and 22 not in self._prev_endpoints:
            discovery_reward += 1.0
        
        # Movement reward/penalty
        movement_reward = 0.0
        if self.previous_hero_pos is not None:
            if hero_pos == self.previous_hero_pos:
                self.no_movement_count += 1
                movement_reward -= 0.05 * min(self.no_movement_count, 3)
            else:
                self.no_movement_count = 0
                movement_reward += 0.005
        
        # Time penalty
        time_penalty = -0.001
        
        total_reward = base_reward + discovery_reward + movement_reward + time_penalty
        
        # Update tracking
        self.previous_total_score = current_total_score
        self.previous_treasure_count = current_treasure_count
        self._prev_endpoints = set(self.discovered_endpoints.keys())
        
        return [total_reward]
        
    def process(self, obs_data: List[Any], last_action: int) -> Tuple[np.ndarray, np.ndarray, List[float]]:
        """Ultra-fast processing"""
        obs, extra_info = obs_data
        
        # Extract components
        frame_state = obs['frame_state']
        score_info = obs['score_info']
        map_info = obs['map_info']
        legal_act = obs['legal_act']
        
        self.step_count = frame_state['step_no']
        
        # Extract hero information
        hero = frame_state['heroes'][0]
        hero_pos = (hero['pos']['x'], hero['pos']['z'])
        
        # Update discovered objects - fast version
        self._update_discovered_objects(frame_state['organs'])
        
        # Build feature vector - optimized
        feature_vec = self._build_feature_vector_fast(
            hero, map_info, score_info, last_action, hero_pos
        )
        
        # Process legal actions
        legal_actions = np.zeros(16, dtype=bool)
        legal_actions[:8] = legal_act[0]
        legal_actions[8:16] = legal_act[1]
        
        # Calculate rewards
        reward_list = self._calculate_reward(hero_pos, score_info)
        
        self.previous_hero_pos = hero_pos
        
        return feature_vec, legal_actions, reward_list
    
    def _update_discovered_objects(self, organs: List[Dict]):
        """Fast object updates"""
        for organ in organs:
            config_id = organ['config_id']
            sub_type = organ['sub_type']
            pos = (organ['pos']['x'], organ['pos']['z'])
            status = organ['status']
            
            if sub_type == 1:  # Treasure
                self.discovered_treasures[config_id] = (*pos, status)
            elif sub_type == 2:  # Speed buff
                self.discovered_buffs[config_id] = (*pos, status)
            elif sub_type == 4:  # Endpoint
                self.discovered_endpoints[config_id] = (*pos, status)
    
    def _process_local_map_fast(self, local_map: np.ndarray, hero_x: int, hero_z: int) -> np.ndarray:
        """ULTRA-FAST map processing - just 2 channels"""
        # Pre-allocate result
        processed = np.zeros((51, 51, 2), dtype=np.float32)
        
        # Channel 0: Basic walkability (FAST - just type conversion)
        processed[:,:,0] = (local_map == 1).astype(np.float32)
        
        # Channel 1: Highlight objectives (OPTIMIZED)
        center = 25
        
        # Fast treasure highlighting
        for tx, tz, status in self.discovered_treasures.values():
            if status == 1:  # Available treasure
                local_tx = tx - hero_x + center
                local_tz = tz - hero_z + center
                if 0 <= local_tx < 51 and 0 <= local_tz < 51:
                    # Simple circle without numpy operations
                    for dy in range(-3, 4):  # 7x7 area
                        for dx in range(-3, 4):
                            ny, nx = local_tz + dy, local_tx + dx
                            if 0 <= ny < 51 and 0 <= nx < 51:
                                if dx*dx + dy*dy <= 9:  # radius 3
                                    processed[ny, nx, 1] = 0.8
        
        # Fast endpoint highlighting
        if 22 in self.discovered_endpoints:
            ex, ez, _ = self.discovered_endpoints[22]
            local_ex = ex - hero_x + center
            local_ez = ez - hero_z + center
            if 0 <= local_ex < 51 and 0 <= local_ez < 51:
                for dy in range(-4, 5):  # 9x9 area
                    for dx in range(-4, 5):
                        ny, nx = local_ez + dy, local_ex + dx
                        if 0 <= ny < 51 and 0 <= nx < 51:
                            if dx*dx + dy*dy <= 16:  # radius 4
                                processed[ny, nx, 1] = 1.0
        
        return processed

    def _get_nearest_treasures_fast(self, hero_pos: Tuple[int, int]) -> List[float]:
        """Fast treasure features - only top 3"""
        hero_x, hero_z = hero_pos
        features = []
        
        # Get distances to all available treasures
        treasure_dists = []
        for config_id, (tx, tz, status) in self.discovered_treasures.items():
            if status == 1:  # Available
                dist_sq = (tx - hero_x)**2 + (tz - hero_z)**2  # Skip sqrt for speed
                treasure_dists.append((dist_sq, tx, tz))
        
        treasure_dists.sort()  # Sort by distance²
        
        # Only keep top 3 treasures for speed
        for i in range(3):
            if i < len(treasure_dists):
                _, tx, tz = treasure_dists[i]
                features.extend([
                    (tx - hero_x) / 127.0,
                    (tz - hero_z) / 127.0,
                    1.0,  # Available
                    1.0   # Discovered
                ])
            else:
                features.extend([0.0, 0.0, -1.0, 0.0])
        
        return features

    def _build_feature_vector_fast(self, hero: Dict, map_info: List[Dict], 
                                 score_info: Dict, last_action: int, hero_pos: Tuple[int, int]) -> np.ndarray:
        """Ultra-fast feature building"""
        hero_x, hero_z = hero_pos
        
        # 1. Spatial features (51x51x2 = 5202) - reduced channels!
        local_map_data = [[row['values'][i] for i in range(51)] for row in map_info]
        local_map = np.array(local_map_data, dtype=np.uint8)  # Use uint8 for speed
        processed_map = self._process_local_map_fast(local_map, hero_x, hero_z)
        spatial_features = processed_map.flatten()  # 5202 features
        
        # 2. Essential non-spatial features only (much reduced!)
        non_spatial_features = [
            # Hero basics (6)
            hero_x / 127.0,
            hero_z / 127.0,
            hero['talent']['status'],
            hero['talent']['cooldown'] / 100.0,
            hero['speed_up'],
            hero['buff_remain_time'] / 51.0,
            
            # Top 3 treasures only (3*4=12)
            *self._get_nearest_treasures_fast(hero_pos),
            
            # Endpoint (4)
            *(self._get_endpoint_features_fast(hero_pos)),
            
            # Score (3) - reduced
            score_info['total_score'] / 1000.0,
            score_info['treasure_collected_count'] / 13.0,
            self.step_count / 300.0,
            
            # Movement (2) - minimal
            1.0 if self.previous_hero_pos and hero_pos != self.previous_hero_pos else 0.0,
            last_action / 15.0 if last_action >= 0 else 0.0,
        ]
        
        # Combine all features
        all_features = np.concatenate([spatial_features, non_spatial_features])
        return all_features.astype(np.float32)

    def _get_endpoint_features_fast(self, hero_pos: Tuple[int, int]) -> List[float]:
        """Fast endpoint features"""
        hero_x, hero_z = hero_pos
        
        if 22 in self.discovered_endpoints:
            ex, ez, status = self.discovered_endpoints[22]
            return [
                (ex - hero_x) / 127.0,
                (ez - hero_z) / 127.0,
                status,
                1.0
            ]
        else:
            return [0.0, 0.0, -1.0, 0.0]