# task4_env.py与task4_train.py是巡航阶段飞机 油门变化 ，输出操作杆指令实现飞行高度变化不大于5米
# 打开DCS之后运行task4_train.py开始训练

import numpy as np
import math
import time
import gym
from gym import spaces
from getDCSdata import get_dcs_data
# 导入手柄控制模块
from vjoy_control_new import *
import logging
import csv
import os
import pygetwindow as gw
import pyvjoy
import matplotlib.pyplot as plt


logger = logging.getLogger(__name__)  # 获取日志记录器

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("results//StableHeight//training.log"),
        logging.StreamHandler()
    ]
)

# 高度偏差阈值
HEIGHT_THRESHOLD = 100
# 高度警戒值为3
HEIGHT_ALERT = 30

# 定义油门变化的间隔步数
THROTTLE_CHANGE_INTERVAL = 50

height_reward = 0
# 在类定义顶部添加常量
THROTTLE_PHASE_END = 100
SPOILER_PHASE_END = 200
FLAP_PHASE_END = 300
MIN_THROTTLE = 0.6  # 最小油门值

class StableHeightEnv(gym.Env):
    def __init__(self):
        # 动作空间：操作杆的俯仰和滚转控制，连续动作空间
        self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
        # 观察空间：飞机的高度、速度、姿态等信息，增加油门维度
        #self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(28,), dtype=np.float32)
        #self.observation_space = spaces.Box(low=1000, high=5000, shape=(1,), dtype=np.float32)
        self.observation_space = spaces.Box(
            low=np.array([1000, -50, -90], dtype=np.float32),  # 高度, 垂直速度(m/s), 俯仰角(度)
            high=np.array([5000, 50, 90], dtype=np.float32),
            shape=(3,),
            dtype=np.float32
        )

        self.initial_height = None
        self.current_height = None
        self.flight_data = None
        # 油门初始值
        self.current_throttle = 0.8
        # 记录步数
        self.step_count = 0
        # 记录回合数
        self.episode_count = 0
        self.total_reward = 0
        self.episode_rewards = []  # 新增：存储每个episode的总reward
        self.pitch_actions = []  # 新增：存储每个step的pitch action
        self.data_logger = None  # 新增：数据记录器
        self.current_reward_data = None  # 新增：当前奖励数据
        self.smoothed_pitch = 0
        self.flight_data = {
            'step': [],
            'altitude_deviation': [],
            'pitch_action': [],
            'rewards': []
        }
        # 添加扰流板和襟翼状态
        self.spoiler_state = False  # 扰流板状态(False=收起, True=展开)
        self.flap_state = False  # 襟翼状态(False=收起, True=展开)
        # 激活 DCS 窗口
        activate_dcs_window()

    def reset(self):
        # 增加回合数
        self.episode_count += 1
        print(f"Starting episode {self.episode_count}")

        # 获取初始飞行数据
        data = get_dcs_data()
        self.initial_height = data["LLA_balt"]
        self.current_height = self.initial_height
        print(f"initial_height= {self.initial_height}: current_height = {self.current_height}")
        # 模拟飞机处于平飞状态
        # time.sleep(5)
        # 重置步数
        self.step_count = 0
        self.total_reward = 0  # 重置总奖励

        self.episode_data = {
            'steps': [],
            'altitude_deviations': [],
            'pitch_actions': [],
            'rewards': []
        }

        # Reset data logger for new episode
        #self.data_logger = FlightDataLogger()
        #self.pitch_actions = []

        # return self._extract_state(data)
        # 返回仅包含高度的状态
        #self._save_episode_data()

        #return np.array([self.initial_height], dtype=np.float32)

        initial_state = np.array([
            self.initial_height,
            data["VV"][1],  # 垂直速度
            data["pitch_deg"]  # 俯仰角
        ], dtype=np.float32)
        return initial_state
        set_throttle(self.current_throttle)

    def step(self, action):
        start_time = time.time()

        # 增加步数
        self.step_count += 1

        # 检查是否需要改变油门
        if self.step_count == 1:
            # 简化油门变化规则，减小变化幅度
            throttle_action = np.random.choice([-0.2, 0.2])
            #self.current_throttle = np.clip(self.current_throttle + throttle_action, 0, 1)
            if throttle_action > 0:
                throttle_up(throttle_action)
            else:
                throttle_down(-throttle_action)
        elif self.step_count == 101:
            # 第101步: 打开扰流板
            airbrake_on_control()
            print("[阶段2] 扰流板已打开")

        elif self.step_count == 110:
            # 第110步: 关闭扰流板
            airbrake_off_control()
            print("[阶段2] 扰流板已关闭")

        elif self.step_count == 201:
            # 第201步: 打开襟翼
            flaps_control()
            print("[阶段3] 襟翼已打开")

        elif self.step_count == 210:
            # 第210步: 关闭襟翼
            flaps_control()  # 再次调用切换状态
            print("[阶段3] 襟翼已关闭")
        # 定义缩放系数
        # ACTION_SCALE = 0.1 #  0.3

        #  self._take_action(action)  # 执行动作
        # 执行操作杆动作

        # 获取当前高度偏差方向
        data = get_dcs_data()
        height_error = data["LLA_balt"] - self.initial_height

        #action 是一个包含单个元素的 numpy.ndarray
        # action 是一个单元素数组
        if isinstance(action, np.ndarray):  #用于检查一个对象是否是指定类或类型的实例。
            if action.size == 1:
                pitch_action = float(action.item())  # 如果是单元素数组，转换为单个数值
                # #.item()方法用于从只包含单个元素的张量中提取值，并返回该值
            else:
                raise ValueError("pitch_action 必须是单个数值或单元素数组。")
        else:
            pitch_action = float(action)  # 直接转换为float

        # pitch_action = action[0]
        # 对动作值进行缩放
        # pitch_action = self.scale_action(pitch_action) * 0.1  # 最终缩放系数

        # 如果飞机偏高(height_error>0)，需要推杆(pitch_action应为负)
        # 如果飞机偏低(height_error<0)，需要拉杆(pitch_action应为正)
        # 因此将动作乘以高度偏差的符号相反数
        directional_pitch_action = pitch_action * (-1 if height_error > 0 else 1)

        SCALE_FACTOR = 0.1  # 缩放系数，可根据需要调整
        #val = pitch_action * SCALE_FACTOR
        val = directional_pitch_action * SCALE_FACTOR
        print(f"Val: {val}, Type: {type(val)}")

        if val > 0:
            # 使用vjoy_control中的机头向下（推杆）函数
            pitch_up(val)
        else:
            # 使用vjoy_control中的机头向上（拉杆）函数
            pitch_down(abs(val))
        """
        self.smoothed_pitch = 0.3 * pitch_action + 0.7 * self.smoothed_pitch  # 低通滤波
        if self.smoothed_pitch > 0.05:  # 加入死区
            pitch_up(abs(self.smoothed_pitch))
        elif self.smoothed_pitch < -0.05:
            pitch_down(abs(self.smoothed_pitch))
        """
        print(f"Action: {action}, Type: {type(action)}")
        print(f"Pitch action: {pitch_action}, Type: {type(pitch_action)}")

        # 记录pitch action
        self.pitch_actions.append(pitch_action)

        # 获取新的飞行数据
        data = get_dcs_data()
        self.current_height = data["LLA_balt"]
        print(f"current_height = {self.current_height}")
        print(f"initial_height = {self.initial_height}")

        # 计算奖励
        reward = self._calculate_reward(data, pitch_action)
        self.total_reward += reward

        # 计算高度偏差
        altitude_deviation = abs(self.current_height - self.initial_height)

        # 输出每次训练的奖励
        print(f"Step {self.step_count}: Reward = {reward}, Current Height Deviation = {altitude_deviation}")

        # 判断是否结束
        done = self._is_done(altitude_deviation)

        # Store data for plotting
        self.episode_data['steps'].append(self.step_count)
        self.episode_data['altitude_deviations'].append(altitude_deviation)
        self.episode_data['pitch_actions'].append(pitch_action)
        self.episode_data['rewards'].append(reward)

        # Save flight data to CSV
        # self._save_flight_data(self.step_count, self.episode_count, self.total_reward, pitch_action, altitude_deviation)

        # Save data and plots when episode ends
        if done:
            self.episode_rewards.append(self.total_reward)
            self._generate_plots()  # 只在episode结束时生成图表
            #self._save_episode_data()
            #self._plot_episode_results()

        # 获取状态
        state = np.array([self.current_height], dtype=np.float32)

        end_time = time.time()
        print(f"Step execution time: {end_time - start_time} seconds")

        # Prepare data for logging
        self.current_reward_data = {
            'total_reward': reward,
            'altitude': self.current_height,
            'altitude_deviation': abs(self.current_height - self.initial_height),
            'pitch_action': pitch_action
        }

        # Log data
        state = self._extract_state(data)
        """
        self.data_logger.log_data(
            self.episode_count,
            self.step_count,
            state,
            self.current_reward_data
        )
        """
        return state, reward, done, {}

    def _extract_state(self, data):
        if data is None:
            raise ValueError("Received None data in _extract_state")

        required_keys = ["LLA_balt", "VV", "pitch_deg"]
        for key in required_keys:
            if key not in data:
                raise KeyError(f"Missing '{key}' key in flight data")

        try:
            state = np.array([
                float(data["LLA_balt"]),  # 高度
                float(data["VV"][1]),  # 垂直速度
                float(data["pitch_deg"])  # 俯仰角
            ], dtype=np.float32)
            return state
        except (TypeError, ValueError, IndexError) as e:
            raise ValueError(f"Error converting flight data to float: {str(e)}")

    def _is_done(self, altitude_deviation):
        # 使用绝对值判断
        print(f"Absolute altitude deviation: {altitude_deviation}")
        condition1 = altitude_deviation > HEIGHT_THRESHOLD
        condition2 = self.step_count >= 300  # 至少运行200步
        return condition1 and condition2

    """
    def _write_flight_data(self, data):
        csv_filename = "results//StableHeight_flight_data.csv"
        file_exists = os.path.exists(csv_filename)
        with open(csv_filename, 'a', newline='') as csvfile:
            writer = csv.writer(csvfile)
            if not file_exists:
                # 简化后的表头，只保留关键信息
                writer.writerow([
                    "timestamp", "altitude_deviation", "throttle", "pitch_action"
                ])

            # 只写入必要的数据
            writer.writerow([
                time.time(),  # 时间戳
                altitude_deviation,  # 高度
                #data["pitch_deg"],  # 俯仰角
                #data["VV"][1],  # 垂直速度(VV_Y)
                self.current_throttle,  # 当前油门值
                #self.pitch_actions[-1] if self.pitch_actions else 0.0  # 最新俯仰动作
            ])
    """

    def _calculate_reward(self, data, pitch_action):
        # 提取所需数据
        current_height = data["LLA_balt"]
        vertical_speed = data["VV"][1]  # 垂直速度
        pitch_angle = data["pitch_deg"]  # 俯仰角

        # 输出所需信息
        print(f"Current Height: {current_height}, Vertical Speed: {vertical_speed}, Pitch Angle: {pitch_angle}")

        # 计算高度偏差
        height_error = current_height - self.initial_height  # 有正负
        altitude_deviation = abs(height_error)  # 用于奖励计算的绝对值

        # 权重设置
        # weight_height = 0.5
        # weight_vertical_speed = 0.25
        # weight_pitch_angle = 0.2
        # weight_action_penalty = 0.05

        # global height_reward
        # 高度奖励
        if abs(altitude_deviation) <= 3:
            # 3米内：轻微递减（100 -> 90）
            height_reward = 100 - (altitude_deviation ** 2) * 1.11  # 100 - 10*(h/3)^2
        elif abs(altitude_deviation) <= 5:
            # 3-5米：线性锐减（90 -> 0）
            height_reward = 90 - 45 * (altitude_deviation - 3)  # 斜率-45
        else:
            # 超过5米：指数惩罚
            height_reward = -2 * (altitude_deviation ** 3)  # 立方惩罚
        """
        高度偏差	奖励值	说明
        高度偏差绝对值	奖励值	说明
        0m	        100.0	完美控制
        1m	        98.9	接近完美
        2m	        95.6	优秀控制
        3m	        90.0	允许的极限偏差
        4m	        45.0	警告区域
        5m	        0.0	    容忍阈值
        >5m	    立方惩罚	    严厉惩罚
        6m	-432	开始严厉惩罚
        10m	-2000	惩罚急剧增加
        20m	-16000	完全不可接受
        """
        # 新增方向一致性奖励
        # 如果高度偏差为正(飞机偏高)，期望pitch_action为负(推杆使飞机下降)
        # 如果高度偏差为负(飞机偏低)，期望pitch_action为正(拉杆使飞机上升)
        direction_consistency = -height_error * pitch_action  # 两者符号相反时为正值

        # 新增动作平滑惩罚（避免剧烈抖动）
        action_penalty = 0.1 * (pitch_action ** 2)

        # 新增高度变化率惩罚（可选）
        # vertical_speed = abs(data["VV"][1])  # 假设VV[1]是垂直速度
        # vspeed_penalty = 0.1 * vertical_speed


        # 垂直速度奖励
        target_vertical_speed = 0  # 目标垂直速度
        vertical_speed_error = abs(vertical_speed - target_vertical_speed)
        if vertical_speed_error < 3:
            vertical_speed_reward = 5 * (1 - vertical_speed_error / 5)
        else:
            vertical_speed_reward = -vertical_speed_error * 50

        # 俯仰角奖励
        target_pitch_angle = 0
        pitch_angle_error = abs(pitch_angle - target_pitch_angle)
        if pitch_angle_error <= 1:
            # 1度以内达成目标，给予较高奖励
            pitch_angle_reward = 20 * pitch_angle_error
        elif pitch_angle_error < 3:
            # 1 - 3度以内给予奖励
            pitch_angle_reward = 20 * (1 - (pitch_angle_error - 1) / 4)
        else:
            # 2度以上给予惩罚
            pitch_angle_reward = -pitch_angle_error * 30
        
        # 加权计算总奖励
        reward = (height_reward + 0.5 * direction_consistency +
                  vertical_speed_reward + pitch_angle_reward -  action_penalty)
        print("reward:", reward)
        print(f"Reward components: height_reward={height_reward:.1f}, "
              f"direction_consistency={direction_consistency:.1f}, "
            f"vertical_speed_reward={vertical_speed_reward:.1f}, "
            f"pitch_angle_reward={pitch_angle_reward:.1f}")
        return reward


    """
    def _take_action(self, action):
        """"""
        专用俯仰控制方法
        参数:
            action: [-1,1] 的俯仰控制量
            逻辑: 油门变化→高度变化→通过俯仰控制补偿
       """"""
        # 1. 动态控制幅度 (高度偏差越大，控制力度越强)
        height_error = self.current_height - self.initial_height
        control_gain = 0.1 + 0.3 * (min(abs(height_error) / 5, 1))  # 偏差>5米时增益加大
        """"""
        # 2. 应用平滑滤波 (减少震荡)
        self.smoothed_pitch = 0.7 * self.last_pitch + 0.3 * action[0]
        pitch_control = np.clip(self.smoothed_pitch * control_gain, -0.3, 0.3)  # 硬限制幅度
        """"""
        # 3. 执行控制 (双向阈值触发)
        if pitch_control > 0.05:  # 死区过滤微小抖动
            pitch_down(pitch_control)  # 机头向下抑制爬升
        elif pitch_control < -0.05:
            pitch_up(abs(pitch_control))  # 机头向上阻止下降

        self.last_pitch = pitch_control
        """

    def _save_episode_data(self):
        # Save episode data to CSV
        csv_filename = f"results//episode_{self.episode_count}_data.csv"
        steps = list(range(self.step_count))

        # 确保 altitude 和 pitch_actions 的长度与 step_count 一致
        if len(self.data_logger.flight_data['altitude']) != self.step_count:
            logger.warning(
                f"Length of altitude data ({len(self.data_logger.flight_data['altitude'])}) does not match step_count ({self.step_count}). Truncating data.")
            self.data_logger.flight_data['altitude'] = self.data_logger.flight_data['altitude'][:self.step_count]

        if len(self.pitch_actions) != self.step_count:
            logger.warning(
                f"Length of pitch_actions ({len(self.pitch_actions)}) does not match step_count ({self.step_count}). Truncating data.")
            self.pitch_actions = self.pitch_actions[:self.step_count]

        with open(csv_filename, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['step', 'altitude_deviation', 'pitch_action'])
            for i in range(self.step_count):
                writer.writerow([
                    steps[i],
                    abs(self.data_logger.flight_data['altitude'][i] - self.initial_height),
                    self.pitch_actions[i]
                ])

    
    def _plot_altitude_vs_pitch(self):
        # Plot altitude and pitch_action vs steps
        try:
            steps = list(range(self.step_count))

            plt.figure(figsize=(12, 6))

            # Plot altitude
            plt.subplot(2, 1, 1)
            plt.plot(steps, [self.data_logger.flight_data['altitude'][i] for i in steps], 'b-', label='Altitude')
            plt.xlabel('Step')
            plt.ylabel('Altitude (m)')
            plt.title(f'Altitude vs Steps (Episode {self.episode_count})')
            plt.grid(True)

            # Plot pitch action
            plt.subplot(2, 1, 2)
            plt.plot(steps, self.pitch_actions, 'r-', label='Pitch Action')
            plt.xlabel('Step')
            plt.ylabel('Pitch Control')
            plt.title('Pitch Action vs Steps')
            plt.grid(True)

            plt.tight_layout()
            plt.savefig(f'results/altitude_pitch_ep{self.episode_count}.png')
            plt.close()
            print(f"Saved altitude vs pitch plot for episode {self.episode_count}")

        except Exception as e:
            print(f"Error plotting altitude vs pitch: {str(e)}")

    def _plot_reward_vs_episodes(self):
        #Plot reward progression across episodes
        try:
            episodes = list(range(1, len(self.episode_rewards) + 1))

            plt.figure(figsize=(8, 5))
            plt.plot(episodes, self.episode_rewards, 'g-o', linewidth=2, markersize=8)
            plt.xlabel('Episode')
            plt.ylabel('Total Reward')
            plt.title('Reward Progression Across Episodes')
            plt.grid(True)

            plt.savefig('results/reward_progression.png')
            plt.close()
            print("Saved reward vs episodes plot")

        except Exception as e:
            print(f"Error plotting reward progression: {str(e)}")

    
    def _plot_episode_results(self):
        #Generate the two requested plots
        try:
            # Plot 1: altitude_deviation vs pitch_action (by step)
            plt.figure(figsize=(12, 5))

            # Subplot 1: altitude_deviation and pitch_action vs steps
            plt.subplot(1, 2, 1)
            steps = list(range(self.step_count))
            altitude_deviations = [abs(alt - self.initial_height) for alt in self.data_logger.flight_data['altitude']]

            plt.plot(steps, altitude_deviations, 'b-', label='Altitude Deviation (m)')
            plt.plot(steps, self.pitch_actions, 'r-', label='Pitch Action')
            plt.xlabel('Step')
            plt.ylabel('Value')
            plt.title(f'Altitude Deviation & Pitch Action (Episode {self.episode_count})')
            plt.legend()
            plt.grid(True)

            # Subplot 2: reward vs episodes (cumulative)
            plt.subplot(1, 2, 2)
            episodes = list(range(1, self.episode_count + 1))
            plt.plot(episodes, self.episode_rewards, 'g-', marker='o')
            plt.xlabel('Episode')
            plt.ylabel('Total Reward')
            plt.title('Total Reward per Episode')
            plt.grid(True)

            plt.tight_layout()
            plt.savefig(f'results//episode_{self.episode_count}_plots.png')
            plt.close()
            print(f"Saved plots for episode {self.episode_count}")

        except Exception as e:
            print(f"Error generating plots: {str(e)}")
            
    

    def log_data(self, episode, step, state, reward_data=None):
        #记录飞行数据
        if not hasattr(self, 'flight_data'):
            self.flight_data = {'episode': [], 'step': [], 'altitude': [], 'altitude_deviation': []}

        self.flight_data['episode'].append(episode)
        self.flight_data['step'].append(step)

        # 修改这里：state现在只有1维，使用state[0]获取高度
        self.flight_data['altitude'].append(state[0])  # 修改前是state[26]
        self.flight_data['altitude_deviation'].append(abs(state[0] - self.initial_height))
    
    def _save_flight_data(self, step, episode_count, total_reward, pitch_action, altitude_deviation):
        csv_filename = "results//StableHeight_flight_data.csv"
        file_exists = os.path.exists(csv_filename)

        with open(csv_filename, 'a', newline='') as csvfile:
            writer = csv.writer(csvfile)
            if not file_exists:
                writer.writerow(["step", "episode_count", "total_reward", "pitch_action", "altitude_deviation"])

            writer.writerow([
                step,
                episode_count,
                total_reward,
                pitch_action,
                altitude_deviation
            ])

    def _generate_plots(self):
        """生成并保存三张关键图表（覆盖旧文件）"""
        try:
            # 确保有数据可绘制
            if not self.episode_data['steps']:
                print("No data available for plotting.")
                return

            # 图表1：高度偏差与奖励随步数变化
            plt.figure(figsize=(10, 5))
            plt.plot(self.episode_data['steps'],
                     self.episode_data['altitude_deviations'],
                     'b-', label='Altitude Deviation (m)')
            plt.plot(self.episode_data['steps'],
                     self.episode_data['rewards'],
                     'g-', label='Reward')
            plt.xlabel('Step')
            plt.ylabel('Value')
            plt.title('Altitude Deviation vs Reward')
            plt.legend()
            plt.grid(True)
            plt.tight_layout()
            plt.savefig("results/altitude_vs_reward.png")
            plt.close()

            # 图表2：各episode总奖励变化
            plt.figure(figsize=(10, 5))
            if len(self.episode_rewards) > 1:
                plt.plot(range(1, len(self.episode_rewards) + 1),
                         self.episode_rewards,
                         'ro-', linewidth=2)
                plt.xlabel('Episode')
                plt.ylabel('Total Reward')
                plt.title('Training Progress')
                plt.grid(True)
            else:
                plt.text(0.5, 0.5, 'Need more episodes\nto show trend',
                         ha='center', va='center')
            plt.tight_layout()
            plt.savefig("results/StableHeight_training_progress.png")
            plt.close()

            # 新增图表3：高度偏差与pitch_action关系
            plt.figure(figsize=(10, 5))
            plt.plot(self.episode_data['steps'],
                     self.episode_data['altitude_deviations'],
                     'b-', label='Altitude Deviation')
            plt.plot(self.episode_data['steps'],
                     self.episode_data['pitch_actions'],
                     'r-', label='Pitch Action')
            plt.xlabel('Step')
            plt.ylabel('Value')
            plt.title('Altitude Deviation vs Pitch Action')
            plt.legend()
            plt.grid(True)
            plt.tight_layout()
            plt.savefig("results/altitude_vs_pitch.png")
            plt.close()

            print("Plots updated: altitude_vs_reward.png, training_progress.png, altitude_vs_pitch.png")

        except Exception as e:
            print(f"Error generating plots: {str(e)}")

    def scale_action(self, raw_action):
        # 对小幅动作更敏感，大幅动作饱和
        return np.sign(raw_action) * (1 - np.exp(-2 * abs(raw_action)))

