import math
import logging
from landing.config import INTEGRATED_CONFIG


class EnhancedLandingEvaluator:
    def __init__(self, config=INTEGRATED_CONFIG):
        self.config = config
        self.runway_config = config['LANDING']['RUNWAY']
        self.param_config = config['LANDING']['PARAMETERS']
        self.reward_config = config['LANDING']['REWARDS']
        self.logger = logging.getLogger(__name__)
        self.last_state = None
        self.landing_phase = None

    def evaluate_landing(self, state_data):
        """评估着陆质量并计算奖励"""
        current_state = self._preprocess_state(state_data)
        rewards = self._calculate_landing_rewards(current_state)
        phase = self._determine_landing_phase(current_state)

        # 记录状态变化
        if self.last_state is not None:
            self._update_tracking_metrics(self.last_state, current_state)

        self.last_state = current_state
        self.landing_phase = phase

        return rewards, phase

    def _preprocess_state(self, state_data):
        """预处理状态数据"""
        return {
            'position': {
                'lat': state_data[31],
                'lon': state_data[30],
                'alt': state_data[32]
            },
            'speeds': {
                'ias': state_data[1],
                'ground_speed': state_data[29],
                'vertical_speed': state_data[10]
            },
            'attitude': {
                'pitch': state_data[3],
                'roll': state_data[4],
                'yaw': state_data[2]
            },
            'rates': {
                'pitch_rate': state_data[25],
                'roll_rate': state_data[24],
                'yaw_rate': state_data[26]
            }
        }

    def _calculate_landing_rewards(self, state):
        """计算着陆奖励"""
        position_reward = self._calculate_position_reward(state['position'])
        speed_reward = self._calculate_speed_reward(state['speeds'])
        attitude_reward = self._calculate_attitude_reward(state['attitude'])
        stability_reward = self._calculate_stability_reward(state['rates'])

        # 计算加权总奖励
        total_reward = (
                position_reward * self.reward_config['POSITION']['WEIGHT'] +
                speed_reward * self.reward_config['SPEED']['WEIGHT'] +
                attitude_reward * self.reward_config['ATTITUDE']['WEIGHT'] +
                stability_reward * self.reward_config['STABILITY']['WEIGHT']
        )

        return {
            'total': total_reward,
            'components': {
                'position': position_reward,
                'speed': speed_reward,
                'attitude': attitude_reward,
                'stability': stability_reward
            }
        }

    def _calculate_position_reward(self, position):
        """计算位置奖励"""
        # 计算到跑道中心线的横向偏差
        lateral_dev = self._calculate_lateral_deviation(position)
        lateral_reward = math.exp(
            -(lateral_dev ** 2) /
            (2 * self.reward_config['POSITION']['LATERAL_SIGMA'] ** 2)
        )

        # 计算纵向位置奖励
        long_dev = self._calculate_longitudinal_position(position)
        longitudinal_reward = 0

        # 在着陆区内给予额外奖励
        for zone in self.runway_config['TOUCHDOWN_ZONES']:
            if zone['START'] <= long_dev <= (zone['START'] + zone['LENGTH']):
                longitudinal_reward = zone['REWARD_MULTIPLIER']
                break

        longitudinal_reward *= math.exp(
            -(long_dev ** 2) /
            (2 * self.reward_config['POSITION']['LONGITUDINAL_SIGMA'] ** 2)
        )

        return (lateral_reward + longitudinal_reward) / 2

    def _calculate_speed_reward(self, speeds):
        """计算速度奖励"""
        ideal_speed = self.param_config['SPEED']['IDEAL']
        speed_error = abs(speeds['ias'] - ideal_speed)
        speed_tolerance = self.param_config['SPEED']['TOLERANCE']

        # 空速奖励
        airspeed_reward = math.exp(-(speed_error ** 2) / (2 * speed_tolerance ** 2))

        # 垂直速度奖励
        vs_error = abs(speeds['vertical_speed'] - self.param_config['VERTICAL_SPEED']['IDEAL'])
        vs_reward = math.exp(-vs_error ** 2 / 2)

        # 地速因子
        ground_factor = math.exp(
            -abs(speeds['ground_speed'] - speeds['ias']) /
            (20 * self.reward_config['SPEED']['GROUND_SPEED_FACTOR'])
        )

        return (airspeed_reward * 0.4 + vs_reward * 0.4 + ground_factor * 0.2)

    def _calculate_attitude_reward(self, attitude):
        """计算姿态奖励"""
        # 俯仰角奖励
        pitch_error = abs(attitude['pitch'] - self.param_config['ATTITUDE']['PITCH']['IDEAL'])
        pitch_tolerance = self.param_config['ATTITUDE']['PITCH']['TOLERANCE']
        pitch_reward = math.exp(-(pitch_error ** 2) / (2 * pitch_tolerance ** 2))

        # 滚转角奖励
        roll_error = abs(attitude['roll'])
        max_roll = self.param_config['ATTITUDE']['ROLL']['MAX']
        roll_reward = math.exp(-(roll_error ** 2) / (2 * (max_roll / 3) ** 2))

        # 考虑跑道对准
        runway_heading = self.runway_config['HEADING']
        heading_error = min(
            abs(attitude['yaw'] - runway_heading),
            360 - abs(attitude['yaw'] - runway_heading)
        )
        heading_reward = math.exp(-(heading_error ** 2) / 200)

        return (
                pitch_reward * self.reward_config['ATTITUDE']['PITCH_FACTOR'] +
                roll_reward * self.reward_config['ATTITUDE']['ROLL_FACTOR'] +
                heading_reward * 0.2
        )

    def _calculate_stability_reward(self, rates):
        """计算稳定性奖励"""
        max_rates = self.reward_config['STABILITY']['RATES_MAX']

        # 计算各个轴向的稳定性惩罚
        pitch_penalty = (rates['pitch_rate'] / max_rates['PITCH']) ** 2
        roll_penalty = (rates['roll_rate'] / max_rates['ROLL']) ** 2
        yaw_penalty = (rates['yaw_rate'] / max_rates['YAW']) ** 2

        total_penalty = pitch_penalty + roll_penalty + yaw_penalty
        stability_score = math.exp(-total_penalty)

        return max(-1.0, stability_score)  # 限制最小值为-1.0


