#include "RLRewardEngine.h"
#include "RLPlanningEngine.h" // Include the full definition here
#include <cmath>
#include <numeric>

namespace algo
{
    RLRewardEngine::RLRewardEngine(const RLPlatformData& platformData, const algobase::RLConfig& config, const RLPlanningEngine& planningEngine)
        : mPlatformData(platformData), mConfig(config), mPlanningEngine(planningEngine)
    {
    }

    RLRewardEngine::~RLRewardEngine()
    {
    }

    std::vector<double> RLRewardEngine::calculateReward(int ix, int iy, int iz, int lastAction, int currentAction, const std::tuple<int, int, int>& end_grid)
    {
        std::vector<double> reward(RLPlanningEngine::REWARD_VECTOR_SIZE, 0.0);

        // Step 1: Check Hard Constraints
        double hard_constraint_penalty = checkHardConstraints(ix, iy, iz);
        if (hard_constraint_penalty < 0.0) {
            reward[RLPlanningEngine::REWARD_SAFETY] = hard_constraint_penalty;
            return reward; // If hard constraint is violated, no other rewards matter
        }

        // Step 2: Calculate Soft Constraints and Rewards
        reward[RLPlanningEngine::REWARD_SAFETY] = calculateSafetyReward(ix, iy, iz);
        reward[RLPlanningEngine::REWARD_HEURISTIC] = calculateHeuristicReward(ix, iy, iz, currentAction, end_grid);
        
        // Placeholder for platform constraints
        // reward[RLPlanningEngine::REWARD_TURN_PENALTY] = calculateTurnPenalty(lastAction, currentAction);
        // reward[RLPlanningEngine::REWARD_PITCH_PENALTY] = calculatePitchPenalty(lastAction, currentAction);

        // --- Goal Approach Reward ---
        double dist_x = ix - std::get<0>(end_grid);
        double dist_y = iy - std::get<1>(end_grid);
        double dist_z = iz - std::get<2>(end_grid);
        double distance_to_goal = std::sqrt(dist_x * dist_x + dist_y * dist_y + dist_z * dist_z);
        if (distance_to_goal > 0) {
            reward[RLPlanningEngine::REWARD_GOAL_APPROACH] = 1.0 / distance_to_goal;
        }

        // --- Step Cost ---
        reward[RLPlanningEngine::REWARD_STEP_COST] = -0.01;

        return reward;
    }

    double RLRewardEngine::checkHardConstraints(int ix, int iy, int iz)
    {
        algobase::CoordZ currentPos = mPlanningEngine.gridToWorld(ix, iy, iz);
        double depth = currentPos[2];
        double seabedDepth = mPlanningEngine.getDepth(currentPos[0], currentPos[1]);

        if (depth < seabedDepth) {
            return -100.0; // Collision with seabed
        }
        return 0.0; // No violation
    }

    double RLRewardEngine::calculateSafetyReward(int ix, int iy, int iz)
    {
        algobase::CoordZ currentPos = mPlanningEngine.gridToWorld(ix, iy, iz);
        double depth = currentPos[2];

        // Soft penalty for being near the surface
        if (depth > -10.0) { // Within 10 meters of the surface
            return -10.0;
        }

        return 0.1; // Base safety reward
    }

    double RLRewardEngine::calculateHeuristicReward(int ix, int iy, int iz, int action, const std::tuple<int, int, int>& end_grid)
    {
        double ideal_dx = std::get<0>(end_grid) - ix;
        double ideal_dy = std::get<1>(end_grid) - iy;
        double ideal_dz = std::get<2>(end_grid) - iz;
        double norm = std::sqrt(ideal_dx * ideal_dx + ideal_dy * ideal_dy + ideal_dz * ideal_dz);
        if (norm > 0) {
            ideal_dx /= norm;
            ideal_dy /= norm;
            ideal_dz /= norm;
        }

        double actual_dx = 0, actual_dy = 0, actual_dz = 0;
        switch (static_cast<RLPlanningEngine::Action>(action))
        {
            case RLPlanningEngine::North: actual_dy = 1; break;
            case RLPlanningEngine::South: actual_dy = -1; break;
            case RLPlanningEngine::East:  actual_dx = 1; break;
            case RLPlanningEngine::West:  actual_dx = -1; break;
            case RLPlanningEngine::Up:    actual_dz = -1; break;
            case RLPlanningEngine::Down:  actual_dz = 1; break;
            case RLPlanningEngine::Invalid: break;
        }
        
        double heuristic_reward = ideal_dx * actual_dx + ideal_dy * actual_dy + ideal_dz * actual_dz;
        return heuristic_reward * 0.5;
    }

    double RLRewardEngine::calculateTurnPenalty(int lastAction, int currentAction)
    {
        // Placeholder: Implement logic based on mPlatformData.max_turn_radius
        return 0.0;
    }

    double RLRewardEngine::calculatePitchPenalty(int lastAction, int currentAction)
    {
        // Placeholder: Implement logic based on mPlatformData.max_pitch_angle
        return 0.0;
    }
}