# takeoff_stabilize_env.py

# 任务: 非配平状态下的增稳起飞控制环境

import gym
from gym import spaces
import numpy as np
import csv
import os
import time
import logging
import matplotlib.pyplot as plt

# 转换为摇杆控制
from getDCSdata import get_dcs_data
from vjoy_control_new import (
    set_pitch, set_roll, set_throttle,
    flaps_control, airbrake_on_control,
    activate_dcs_window, reset as dcs_reset
)

# ========= 参数配置 =========

INIT_ALT = 18.0  # 初始海拔高度（单位: 米）
MIN_TAKEOFF_SPEED = 55.0  # 最小安全起飞速度（单位: m/s）
CSV_LOG = "results/training_log.csv"  # 训练日志文件
TRAJ_LOG = "results/trajectory.csv"  # 飞行轨迹记录文件路径
TRAJ_PLOT = "results/trajectory_plot.png"  # 飞行轨迹图保存路径

# 创建结果目录

os.makedirs("results", exist_ok=True)

# ========= 日志配置 =========

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[
        logging.FileHandler("results/env_debug.log"),  # 日志文件
        logging.StreamHandler()  # 控制台输出
    ]
)
logger = logging.getLogger(__name__)


# ========= 环境类 =========

class TakeoffStabilizeEnv(gym.Env):
    """
    自定义 Gym 环境，用于强化学习起飞控制任务
    在非配平初态下控制飞机起飞并保持姿态稳定
    """

    def __init__(self):
        super().__init__()

        # 定义状态空间（21维连续向量）
        self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(21,), dtype=np.float32)

        # 定义动作空间：俯仰、滚转、扰流板、油门、襟翼（5维连续）
        self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(5,), dtype=np.float32)

        # 初始化环境状态
        self.episode = 0
        self.step_count = 0
        self.prev_action = np.zeros(5)  # 上一步动作
        self.is_on_ground = True
        self.initial_alt = INIT_ALT
        self.initial_heading = 0
        self.trajectory = []

        self._init_csv()  # 初始化日志文件

    def _init_csv(self):
        # 初始化 CSV 文件结构
        with open(CSV_LOG, "w", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                "Episode", "Step", "Reward", "Pitch", "Bank", "Yaw_Error",
                "RPM", "Flaps", "Altitude", "IAS", "Done"
            ])
        with open(TRAJ_LOG, "w", newline="") as f:
            writer = csv.writer(f)
            writer.writerow(["Lon", "Lat", "Alt"])

    def reset(self):
        # 重置环境状态，每轮训练开始
        self.episode += 1
        self.step_count = 0
        self.prev_action = np.zeros(5)
        self.is_on_ground = True
        self.trajectory = []

        try:
            activate_dcs_window()  # 激活 DCS 控制窗口
        except Exception as e:
            logger.warning(f"激活窗口失败: {e}")

        dcs_reset()
        time.sleep(1.0)

        data = get_dcs_data()
        self.initial_heading = data.get("heading_deg", 0)
        self.initial_alt = data.get("LLA_balt", INIT_ALT)

        self._apply_initial_perturbation(data)  # 模拟非配平初始状态

        self.state = self._extract_state(data)

        # 将初始控制动作应用到飞机上，保持 prev_action 同步
        self._take_action(self.prev_action)
        return self.state

    def _apply_initial_perturbation(self, data):
        # 施加扰动到姿态参数以打破平衡状态
        data["pitch_deg"] += np.random.uniform(-10, 10)
        data["yaw_deg"] += np.random.choice([-10, 10])
        data["Omega_X"] = np.random.uniform(-0.3, 0.3)
        data["Omega_Y"] = np.random.uniform(-0.3, 0.3)  # 滚转角速度扰动

    def step(self, action):
        # 执行动作并反馈新状态、奖励与是否结束
        self._take_action(action)
        time.sleep(0.1)  # 控制更新频率

        data = get_dcs_data()
        state = self._extract_state(data)
        reward, done = self._calculate_reward(data, action)

        self._log_step(reward, data, done)
        self.state = state
        self.step_count += 1
        return state, reward, done, {}

    def _extract_state(self, data):
        # 提取当前状态（含位姿、速度、控制面等）
        yaw_error = np.abs(data.get("heading_deg", 0) - self.initial_heading) % 360
        if yaw_error > 180:
            yaw_error = 360 - yaw_error

        lon = data.get("LLA_long", 0.0)
        lat = data.get("LLA_lat", 0.0)
        alt = data.get("LLA_balt", INIT_ALT)

        self.trajectory.append((lon, lat, alt))  # 记录轨迹

        return np.array([
            data.get("IAS", 0),  # 0 空速
            data.get("pitch_deg", 0),  # 1 俯仰角
            data.get("bank_deg", 0),  # 2 滚转角
            data.get("AoA", 0),  # 3 攻角
            data.get("VV_0", 0),  # 4 垂直速度
            float(data.get("Gears", 1.0)),  # 5 起落架状态
            float(data.get("Flaps", 0.0)),  # 6 襟翼状态
            float(data.get("AirBrake", 0.0)),  # 7 扰流板状态
            data.get("Omega_X", 0),  # 8 滚转角速度
            data.get("Omega_Y", 0),  # 9 俯仰角速度
            data.get("Omega_Z", 0),  # 10 偏航角速度
            data.get("RPM", [0.0])[0] if isinstance(data.get("RPM"), list) else data.get("RPM", 0.0),
            alt,  # 12 当前高度
            lat,  # 13 纬度
            lon,  # 14 经度
            yaw_error,  # 15 航向误差
            *self.prev_action  # 16–20 上一步动作
        ], dtype=np.float32)

    def _take_action(self, action):
        # 将动作映射为真实控制指令
        pitch_val = np.clip(action[0], -1.0, 1.0)
        roll_val = np.clip(action[1], -1.0, 1.0)
        spoiler = action[2]
        throttle_val = np.clip((action[3] + 1.0) / 2.0, 0.0, 1.0)
        flap = action[4]

        set_pitch(pitch_val)
        set_roll(roll_val)
        set_throttle(throttle_val)
        if spoiler > 0.5:
            airbrake_on_control()
        if flap > 0.5:
            flaps_control()

    def _calculate_reward(self, data, action):
        """
        奖励函数：衡量智能体在起飞过程中的表现。
        奖励目标包括：速度、姿态稳定性、航向保持、控制平滑性、起飞成功等。
        """
        pitch = np.abs(data.get("pitch_deg", 0))
        bank = np.abs(data.get("bank_deg", 0))
        heading = data.get("heading_deg", 0)
        aoa = data.get("AoA", 0)
        speed = data.get("IAS", 0)
        vspeed = data.get("VV_0", 0)
        rpm_data = data.get("RPM", 0.0)
        rpm = rpm_data[0] if isinstance(rpm_data, list) else rpm_data
        flaps = data.get("Flaps", 0.0)
        pitch_rate = np.abs(data.get("Omega_X", 0))
        roll_rate = np.abs(data.get("Omega_Y", 0))
        throttle = rpm / 100.0
        gear_status = data.get("Gears", 1.0)
        alt = data.get("LLA_balt", self.initial_alt)

        # 航向角偏离计算
        yaw_error = np.abs(heading - self.initial_heading) % 360
        if yaw_error > 180:
            yaw_error = 360 - yaw_error

        # ========== 各项奖励计算 ==========

        # 1. 空速奖励
        target_speed = 55
        reward_speed = min(speed / target_speed, 1.0)

        # 2. 垂直速度奖励
        reward_vspeed = 0.0
        if gear_status > 0.9:
            reward_vspeed = max(min(vspeed / 10.0, 1.0), 0.0)
        elif vspeed < -2:
            reward_vspeed = -min(np.abs(vspeed) / 10.0, 1.0)

        # 3. 姿态稳定性奖励
        reward_stability = 0.0
        if np.abs(bank) < 5:
            reward_stability += 0.6
        reward_stability += max(1.0 - pitch_rate / 2.0, 0.0)
        reward_stability += max(1.0 - roll_rate / 2.0, 0.0)
        reward_stability += max(1.0 - np.abs(aoa - 10) / 10.0, 0.0)
        reward_stability = reward_stability / 4.6

        # 4. 航向保持奖励
        reward_heading = max(1.0 - yaw_error / 30.0, 0.0)

        # 5. 起飞成功奖励
        reward_takeoff = 0.0
        done = False
        if gear_status < 0.1 and alt > self.initial_alt + 50:
            reward_takeoff = 1.0 + 1.0 / (self.step_count + 1)
            done = True

        # 6. 时间惩罚
        reward_time_penalty = -0.002 * self.step_count

        # 7. 控制配置奖励
        reward_controls = (throttle + (0.5 if flaps > 0 else 0)) / 2.0

        # 8. 控制平滑性惩罚
        reward_smooth = -0.05 * np.linalg.norm(np.array(action) - self.prev_action)

        # 9. 危险姿态惩罚
        reward_danger = -0.05 * max(pitch - 20, 0) - 0.05 * max(bank - 20, 0)

        self.prev_action = np.array(action)

        # 各项权重
        w = {
            "speed": 1.5,
            "vspeed": 1.0,
            "stability": 2.5,
            "heading": 2.0,
            "takeoff": 3.0,
            "controls": 0.5
        }

        # 总奖励计算（加权组合 + 惩罚项）
        total_reward = (
                w["speed"] * reward_speed +
                w["vspeed"] * reward_vspeed +
                w["stability"] * reward_stability +
                w["heading"] * reward_heading +
                w["takeoff"] * reward_takeoff +
                w["controls"] * reward_controls +
                reward_time_penalty +
                reward_smooth +
                reward_danger
        )

        return total_reward, done

    def _log_step(self, reward, data, done):
        # 写入奖励与状态日志
        with open(CSV_LOG, "a", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                self.episode, self.step_count, reward,
                data.get("pitch_deg", 0),
                data.get("bank_deg", 0),
                abs(data.get("heading_deg", 0) - self.initial_heading),
                data.get("RPM", [0.0])[0] if isinstance(data.get("RPM"), list) else data.get("RPM", 0.0),
                data.get("Flaps", 0),
                data.get("LLA_balt", 0),
                data.get("IAS", 0),
                done
            ])
        with open(TRAJ_LOG, "a", newline="") as f:
            writer = csv.writer(f)
            writer.writerow([
                data.get("LLA_long", 0),
                data.get("LLA_lat", 0),
                data.get("LLA_balt", 0)
            ])

    def render(self):
        # 绘制当前回合的飞行轨迹图
        if not self.trajectory:
            logger.warning("暂无轨迹数据可视化")
            return

        lons, lats, alts = zip(*self.trajectory)
        fig, ax = plt.subplots(figsize=(8, 6))
        ax.plot(lons, lats, label="Flight Path", linestyle="--")
        ax.set_xlabel("Longitude")
        ax.set_ylabel("Latitude")
        ax.set_title(f"Flight Trajectory (Episode {self.episode})")
        ax.legend()
        ax.grid(True)
        plt.savefig(TRAJ_PLOT)
        plt.close()
        logger.info(f"轨迹图已保存至 {TRAJ_PLOT}")

    def save_reward_plot(self):
        # 绘制奖励趋势图
        try:
            rewards = []
            steps = []
            with open(CSV_LOG, "r") as f:
                reader = csv.DictReader(f)
                for row in reader:
                    if int(row["Episode"]) == self.episode:
                        rewards.append(float(row["Reward"]))
                        steps.append(int(row["Step"]))
            if not rewards:
                logger.warning("无奖励数据可绘制")
                return
            plt.figure(figsize=(10, 4))
            plt.plot(steps, rewards, label="Reward")
            plt.xlabel("Step")
            plt.ylabel("Reward")
            plt.title(f"Reward Trend (Episode {self.episode})")
            plt.grid(True)
            plt.legend()
            plt.savefig(f"results/reward_trend_episode_{self.episode}.png")
            plt.close()
            logger.info("已保存奖励曲线图")
        except Exception as e:
            logger.error(f"绘图失败: {e}")

    def _is_done(self, state, data):
        # 智能终止判断逻辑
        pitch = np.abs(state[1])
        bank = np.abs(state[2])
        alt = state[12]
        gear = state[5]

        if pitch > 45 or bank > 45:
            logger.warning("危险姿态，任务终止")
            return True
        if alt < 5:
            logger.warning("坠毁或贴地飞行")
            return True
        if gear < 0.1 and alt > self.initial_alt + 50:
            logger.info("成功起飞，任务完成")
            return True
        if self.step_count > 200:
            logger.info("超出最大步数")
            return True
        return False
