#include "CoreLogic.h"
#include "CacheManager.h"
#include "VisualizationExporter.h"
#include <vector>
#include <unordered_set>
#include <map>
#include <algorithm>
#include <cmath>
#include <tuple>

namespace CoreLogic {

    struct SpillPlan {
        std::vector<int> victim_ids;
        size_t cost_future_use = 0;
        double cost_volume = 0.0;
        double cost_victims = 0.0;
        long long start_address = 0;
        std::unordered_map<int, std::pair<size_t, long long>> victim_details;
    };

    SpillPlan select_spill_plan(const Graph& graph, const std::unordered_map<int, std::tuple<int, long long, long long>>& active_buffers,
        CacheManager& manager, long long size_needed, int type_code, const std::vector<int>& schedule, size_t step,
        const Params& params);


    double calculateTotalTime(const Solution& solution, const Graph& graph, const CodeMap& code_map) {
        auto predecessors_list = graph.predecessors_list;
        std::unordered_map<int, std::pair<int, int>> buf_id_to_nodes;

        for (const auto& node : graph.nodes) {
            if (node.id == -1 || !node.buf_id.has_value()) continue;
            if (node.op_code == code_map.ALLOC) {
                buf_id_to_nodes[node.buf_id.value()].first = node.id;
            }
            else if (node.op_code == code_map.FREE) {
                buf_id_to_nodes[node.buf_id.value()].second = node.id;
            }
        }

        std::unordered_map<int, std::tuple<long long, long long, int>> active_allocations;
        std::unordered_map<long long, int> address_history;

        std::unordered_map<int, int> buf_id_to_free_node;
        for (const auto& node : graph.nodes) {
            if (node.id != -1 && node.buf_id.has_value() && node.op_code == code_map.FREE) {
                buf_id_to_free_node[node.buf_id.value()] = node.id;
            }
        }

        for (int node_id : solution.final_schedule) {
            if (node_id >= graph.nodes.size()) continue;
            const auto& node = graph.nodes[node_id];

            bool is_alloc = (node.op_code == code_map.ALLOC);
            bool is_spill_in = (node.op == "SPILL_IN");

            if (is_alloc || is_spill_in) {
                if (!node.buf_id.has_value()) continue;
                int buf_id = node.buf_id.value();

                long long offset = -1;
                if (is_alloc) {
                    if (solution.memory_offsets.count(buf_id)) {
                        offset = solution.memory_offsets.at(buf_id);
                    }
                }
                else {
                    if (solution.spill_map.count(node_id)) {
                        offset = solution.spill_map.at(node_id);
                    }
                }

                if (offset != -1) {
                    if (address_history.count(offset)) {
                        int last_free_node = address_history.at(offset);
                        auto& preds = predecessors_list.at(node_id);
                        if (std::find(preds.begin(), preds.end(), last_free_node) == preds.end()) {
                            preds.push_back(last_free_node);
                        }
                    }

                    int free_node_id = buf_id_to_free_node.count(buf_id) ? buf_id_to_free_node.at(buf_id) : -1;
                    long long size = node.size.has_value() ? node.size.value() : 0;
                    active_allocations[buf_id] = { offset, size, free_node_id };
                }
            }
            else if (node.op_code == code_map.FREE || node.op == "SPILL_OUT") {
                if (!node.buf_id.has_value() || !active_allocations.count(node.buf_id.value())) continue;
                int buf_id = node.buf_id.value();

                auto alloc_info = active_allocations.at(buf_id);
                long long offset = std::get<0>(alloc_info);

                address_history[offset] = node.id;
                active_allocations.erase(buf_id);
            }
        }

        std::vector<double> node_end_times(graph.num_nodes + 1, 0.0);
        std::unordered_map<std::string, double> pipe_free_time;

        for (int node_id : solution.final_schedule) {
            if (node_id >= graph.nodes.size() || node_id <= 0) continue;
            const auto& node = graph.nodes[node_id];

            double predecessor_finish_time = 0.0;
            if (node_id < predecessors_list.size()) {
                for (int pred_id : predecessors_list[node_id]) {
                    if (pred_id > 0 && pred_id < node_end_times.size()) {
                        predecessor_finish_time = std::max(predecessor_finish_time, node_end_times[pred_id]);
                    }
                }
            }

            if (!node.cycles.has_value() || node.cycles.value() == 0) {
                node_end_times[node_id] = predecessor_finish_time;
                continue;
            }

            double resource_available_time = 0.0;
            if (!node.pipe.empty() && pipe_free_time.count(node.pipe)) {
                resource_available_time = pipe_free_time[node.pipe];
            }

            double start_time = std::max(predecessor_finish_time, resource_available_time);
            double end_time = start_time + node.cycles.value();
            node_end_times[node_id] = end_time;
            if (!node.pipe.empty()) {
                pipe_free_time[node.pipe] = end_time;
            }
        }

        if (node_end_times.empty()) return 0.0;
        return *std::max_element(node_end_times.begin(), node_end_times.end());
    }


    std::pair<Solution, Graph> performAllocation(
        const std::vector<int>& initial_schedule, const Graph& initial_graph, const CodeMap& code_map,
        const Caps& caps, const Params& params, VisualizationExporter* viz_exporter) {

        Solution solution;
        Graph graph = initial_graph;

        int next_node_id = graph.num_nodes + 1;

        std::unordered_map<int, CacheManager> cache_managers;
        cache_managers.emplace(code_map.L1, CacheManager(caps.l1));
        cache_managers.emplace(code_map.UB, CacheManager(caps.ub));
        cache_managers.emplace(code_map.L0A, CacheManager(caps.l0a));
        cache_managers.emplace(code_map.L0B, CacheManager(caps.l0b));
        cache_managers.emplace(code_map.L0C, CacheManager(caps.l0c));

        std::unordered_map<int, std::tuple<int, long long, long long>> active_buffers;
        std::unordered_map<int, std::pair<int, long long>> spilled_buffers;

        std::unordered_set<int> copy_in_bufs;
        for (const auto& node : graph.nodes) {
            if (node.op == "COPY_IN") {
                copy_in_bufs.insert(node.bufs.begin(), node.bufs.end());
            }
        }

        std::unordered_map<int, int> buf_id_to_free_step;
        for (size_t i = 0; i < initial_schedule.size(); ++i) {
            int node_id = initial_schedule[i];
            if (node_id < graph.nodes.size()) {
                const auto& node = graph.nodes[node_id];
                if (node.op_code == code_map.FREE && node.buf_id.has_value()) {
                    buf_id_to_free_step[node.buf_id.value()] = i;
                }
            }
        }

        std::vector<int> schedule = initial_schedule;
        size_t max_steps = initial_schedule.size() * 1.5;
        size_t step = 0;

        std::cout << "        DEBUG: Starting allocation loop. Max steps: " << max_steps 
                  << ", Initial schedule size: " << initial_schedule.size() << std::endl;
        
        while (step < schedule.size()) {
            if (step > max_steps) {
                std::cout << "        DEBUG: TIMEOUT! Step " << step << " > max_steps " << max_steps << std::endl;
                std::cout << "        DEBUG: Schedule grew to size: " << schedule.size() << std::endl;
                
                int processed_count = 0;
                std::unordered_set<int> initial_set(initial_schedule.begin(), initial_schedule.end());
                for (size_t i = 0; i < step; ++i) {
                    if (initial_set.count(schedule[i])) processed_count++;
                }
                solution.nodes_processed = static_cast<double>(processed_count);
                solution.timed_out = true;
                
                std::cout << "        DEBUG: Processed " << processed_count 
                          << " original nodes out of " << initial_schedule.size() << std::endl;
                break;
            }

            int node_id = schedule[step];
            const auto& node = graph.nodes[node_id];

            bool is_alloc_request = (node.op_code == code_map.ALLOC);
            bool is_spill_in_request = (node.op == "SPILL_IN");

            if (is_alloc_request || is_spill_in_request) {
                if (!node.buf_id.has_value()) { step++; continue; }
                int buf_id = node.buf_id.value();
                long long size_needed;
                int type_code;

                if (is_alloc_request) {
                    if (!node.size.has_value() || node.size.value() <= 0 || node.type_code == 0) { step++; continue; }
                    size_needed = node.size.value();
                    type_code = node.type_code;
                }
                else {
                    if (!spilled_buffers.count(buf_id)) { step++; continue; }
                    auto spill_info = spilled_buffers.at(buf_id);
                    type_code = spill_info.first;
                    size_needed = spill_info.second;
                }

                auto& manager = cache_managers.at(type_code);
                int current_buf_free_step = buf_id_to_free_step.count(buf_id) ? buf_id_to_free_step.at(buf_id) : -1;

                long long offset = manager.allocate(size_needed, buf_id, params.alloc_strategy, buf_id_to_free_step, current_buf_free_step);

                if (offset != -1) {
                    if (is_alloc_request) {
                        solution.memory_offsets[buf_id] = offset;
                    }
                    else {
                        solution.spill_map[node_id] = offset;
                        spilled_buffers.erase(buf_id);
                    }
                    active_buffers[buf_id] = { type_code, offset, size_needed };
                    
                    // Record memory allocation
                    if (viz_exporter != nullptr) {
                        viz_exporter->logMemoryAllocation(step, type_code, buf_id, offset, size_needed, "alloc");
                        
                        // Record memory usage statistics
                        auto& manager = cache_managers.at(type_code);
                        long long total_allocated = 0;
                        for (const auto& block : manager.getAllocatedBlocks()) {
                            total_allocated += block.size;
                        }
                        double usage_percent = (double)total_allocated / manager.getCapacity() * 100.0;
                        int num_free_blocks = manager.getFreeBlocks().size();
                        int max_free_block_size = 0;
                        for (const auto& free_block : manager.getFreeBlocks()) {
                            max_free_block_size = std::max(max_free_block_size, (int)free_block.size);
                        }
                        viz_exporter->logMemoryStats(step, type_code, usage_percent, num_free_blocks, max_free_block_size);
                    }
                    
                    step++;
                    continue;
                }
                else {
                    SpillPlan best_plan = select_spill_plan(graph, active_buffers, manager, size_needed, type_code, schedule, step, params);
                    if (best_plan.victim_ids.empty()) {
                        solution.spill_failed = true;
                        solution.nodes_processed = static_cast<double>(step);
                        solution.final_schedule = schedule;
                        return { solution, graph };
                    }

                    std::vector<std::pair<size_t, int>> insertions;
                    for (int victim_buf_id : best_plan.victim_ids) {
                        auto victim_info = active_buffers.at(victim_buf_id);
                        int victim_type_code = std::get<0>(victim_info);
                        long long victim_size = std::get<2>(victim_info);

                        solution.total_spill_volume += (copy_in_bufs.count(victim_buf_id) ? victim_size : victim_size * 2);
                        spilled_buffers[victim_buf_id] = { victim_type_code, victim_size };
                        
                        // Record memory deallocation (before erasing from active_buffers)
                        if (viz_exporter != nullptr) {
                            int victim_offset = std::get<1>(victim_info);
                            viz_exporter->logMemoryAllocation(step, victim_type_code, victim_buf_id, victim_offset, victim_size, "free");
                        }
                        
                        active_buffers.erase(victim_buf_id);
                        cache_managers.at(victim_type_code).free(victim_buf_id);

                        NodeData spill_out_node, spill_in_node;
                        spill_out_node.id = next_node_id++;
                        spill_out_node.op = "SPILL_OUT";
                        spill_out_node.buf_id = victim_buf_id;
                        spill_out_node.pipe = "MTE3";
                        spill_out_node.cycles = copy_in_bufs.count(victim_buf_id) ? 0 : victim_size * 2 + 150;

                        spill_in_node.id = next_node_id++;
                        spill_in_node.op = "SPILL_IN";
                        spill_in_node.buf_id = victim_buf_id;
                        spill_in_node.pipe = "MTE2";
                        spill_in_node.cycles = victim_size * 2 + 150;

                        // *** MODIFIED: Resize all graph vectors to ensure consistency ***
                        graph.nodes.resize(next_node_id);
                        graph.adj_list.resize(next_node_id);
                        graph.predecessors_list.resize(next_node_id);

                        graph.nodes[spill_out_node.id] = spill_out_node;
                        graph.nodes[spill_in_node.id] = spill_in_node;

                        size_t insertion_point = best_plan.victim_details.at(victim_buf_id).first;

                        size_t capped_insertion_point = std::min(insertion_point, schedule.size());

                        insertions.push_back({ step, spill_out_node.id });
                        insertions.push_back({ capped_insertion_point, spill_in_node.id });
                    }
                    graph.num_nodes = next_node_id - 1;

                    std::sort(insertions.rbegin(), insertions.rend());
                    for (const auto& ins : insertions) {
                        schedule.insert(schedule.begin() + ins.first, ins.second);
                    }
                }

            }
            else if (node.op_code == code_map.FREE) {
                if (node.buf_id.has_value() && active_buffers.count(node.buf_id.value())) {
                    int buf_id = node.buf_id.value();
                    auto buffer_info = active_buffers.at(buf_id);
                    int type_code = std::get<0>(buffer_info);
                    long long buffer_size = std::get<2>(buffer_info);

                    cache_managers.at(type_code).free(buf_id);
                    
                    // Record buffer lifetime end (we need to track start step separately)
                    if (viz_exporter != nullptr) {
                        // Note: We'll need to add buffer start tracking for complete lifetime data
                        // For now, we can add a placeholder or track allocation steps
                        BufferLifetime lifetime;
                        lifetime.buffer_id = buf_id;
                        lifetime.start_step = 0;  // TODO: Track actual allocation step
                        lifetime.end_step = step;
                        lifetime.cache_type = type_code;
                        lifetime.size = buffer_size;
                        viz_exporter->addBufferLifetime(lifetime);
                    }

                    active_buffers.erase(buf_id);
                }
                step++;
            }
            else {
                step++;
            }
        }

        solution.final_schedule = schedule;
        return { solution, graph };
    }


    bool validateSolution(const Solution& solution, const Graph& graph, const CodeMap& code_map, const Caps& caps) {
        std::unordered_map<int, std::pair<long long, int>> buf_info_map;
        for (const auto& node : graph.nodes) {
            if (node.id != -1 && node.op_code == code_map.ALLOC && node.buf_id.has_value() && node.size.has_value()) {
                buf_info_map[node.buf_id.value()] = { node.size.value(), node.type_code };
            }
        }

        std::unordered_map<int, size_t> node_positions;
        for (size_t i = 0; i < solution.final_schedule.size(); ++i) {
            node_positions[solution.final_schedule[i]] = i;
        }

        for (int u = 1; u <= graph.num_nodes; ++u) {
            if (!node_positions.count(u) || u >= graph.adj_list.size()) continue;
            for (int v : graph.adj_list[u]) {
                if (node_positions.count(v) && node_positions.at(u) > node_positions.at(v)) {
                    std::cerr << "Validation failed: Topological order violated for " << u << " and " << v << std::endl;
                    return false;
                }
            }
        }

        std::unordered_map<int, CacheManager> cache_managers;
        cache_managers.emplace(code_map.L1, CacheManager(caps.l1));
        cache_managers.emplace(code_map.UB, CacheManager(caps.ub));
        cache_managers.emplace(code_map.L0A, CacheManager(caps.l0a));
        cache_managers.emplace(code_map.L0B, CacheManager(caps.l0b));
        cache_managers.emplace(code_map.L0C, CacheManager(caps.l0c));
        std::unordered_map<int, int> l0_active = { {code_map.L0A, 0}, {code_map.L0B, 0}, {code_map.L0C, 0} };
        std::unordered_map<int, int> active_buffers;

        for (size_t step_idx = 0; step_idx < solution.final_schedule.size(); ++step_idx) {
            int node_id = solution.final_schedule[step_idx];
            if (node_id >= graph.nodes.size()) continue;
            const auto& node = graph.nodes[node_id];

            if (node.op == "SPILL_IN" || node.op_code == code_map.ALLOC) {
                if (!node.buf_id.has_value()) continue;
                int buf_id = node.buf_id.value();
                long long offset, size;
                int type_code;

                if (node.op_code == code_map.ALLOC) {
                    if (!solution.memory_offsets.count(buf_id)) return false;
                    offset = solution.memory_offsets.at(buf_id);
                    size = node.size.value();
                    type_code = node.type_code;
                }
                else {
                    if (!solution.spill_map.count(node_id) || !buf_info_map.count(buf_id)) return false;
                    offset = solution.spill_map.at(node_id);
                    auto info = buf_info_map.at(buf_id);
                    size = info.first;
                    type_code = info.second;
                }

                auto& manager = cache_managers.at(type_code);
                bool success = manager.allocateAt(size, buf_id, offset);

                if (!success) {
                    std::cerr << "Validation failed: Memory allocation conflict for buf_id " << buf_id << std::endl;
                    return false;
                }
                active_buffers[buf_id] = type_code;
                if (l0_active.count(type_code)) {
                    l0_active[type_code]++;
                    if (l0_active[type_code] > 1) {
                        std::cerr << "Validation failed: L0 constraint violated for type_code " << type_code << std::endl;
                        return false;
                    }
                }

            }
            else if (node.op_code == code_map.FREE || node.op == "SPILL_OUT") {
                if (!node.buf_id.has_value() || !active_buffers.count(node.buf_id.value())) continue;
                int buf_id = node.buf_id.value();
                int type_code = active_buffers.at(buf_id);

                auto& manager = cache_managers.at(type_code);
                manager.free(buf_id);

                active_buffers.erase(buf_id);
                if (l0_active.count(type_code)) {
                    l0_active[type_code]--;
                }
            }
        }

        return true;
    }


    std::pair<double, double> evaluateSchedule(
        const std::vector<int>& schedule, const Graph& graph, const CodeMap& code_map,
        const Caps& caps, const Params& params,
        const std::optional<std::pair<double, double>>& initial_scores, VisualizationExporter* viz_exporter) {

        auto [solution, final_graph] = performAllocation(schedule, graph, code_map, caps, params, viz_exporter);

        bool is_valid = true;
        if (!solution.timed_out && !solution.spill_failed) {
            is_valid = validateSolution(solution, final_graph, code_map, caps);
        }

        double total_time = std::numeric_limits<double>::infinity();
        double total_spill = std::numeric_limits<double>::infinity();

        if (solution.timed_out || solution.spill_failed || !is_valid) {
            std::cout << "      DEBUG: Solution failed - timed_out: " << solution.timed_out 
                      << ", spill_failed: " << solution.spill_failed 
                      << ", is_valid: " << is_valid << std::endl;
            std::cout << "      DEBUG: Nodes processed: " << solution.nodes_processed 
                      << "/" << schedule.size() << std::endl;
            if (!initial_scores.has_value()) {
                std::cout << "      DEBUG: No initial scores, returning inf" << std::endl;
                // Return inf, inf
            }
            else {
                double progress_ratio = solution.nodes_processed / static_cast<double>(schedule.size());
                if (!is_valid) progress_ratio = 1.0;

                double penalty_factor = 10.0;
                total_time = initial_scores.value().first * (2.0 + penalty_factor * (1.0 - progress_ratio));
                total_spill = static_cast<double>(solution.total_spill_volume) + initial_scores.value().second * (2.0 - progress_ratio);
            }
        }
        else {
            total_spill = static_cast<double>(solution.total_spill_volume);
            total_time = calculateTotalTime(solution, final_graph, code_map);
        }

        return { total_time, total_spill };
    }


    SpillPlan select_spill_plan(const Graph& graph, const std::unordered_map<int, std::tuple<int, long long, long long>>& active_buffers,
        CacheManager& manager, long long size_needed, int type_code, const std::vector<int>& schedule, size_t step,
        const Params& params) {

        std::cout << "          DEBUG: select_spill_plan called - need " << size_needed 
                  << " bytes, type " << type_code << ", step " << step << std::endl;
        std::cout << "          DEBUG: Active buffers: " << active_buffers.size() << std::endl;

        std::unordered_map<int, std::pair<size_t, long long>> all_candidate_info;
        for (const auto& pair : active_buffers) {
            int candidate_buf_id = pair.first;
            const auto& buffer_info = pair.second;
            if (std::get<0>(buffer_info) != type_code) continue;

            size_t farthest_use = 0;
            for (size_t future_step = step + 1; future_step < schedule.size(); ++future_step) {
                int future_node_id = schedule[future_step];
                const auto& future_node = graph.nodes[future_node_id];
                bool found = false;
                if (std::find(future_node.bufs.begin(), future_node.bufs.end(), candidate_buf_id) != future_node.bufs.end()) {
                    found = true;
                }
                if (!found && future_node.buf_id.has_value() && future_node.buf_id.value() == candidate_buf_id) {
                    found = true;
                }
                if (found) {
                    farthest_use = future_step;
                    break;
                }
            }
            if (farthest_use == 0) farthest_use = schedule.size() + 2;
            all_candidate_info[candidate_buf_id] = { farthest_use, std::get<2>(buffer_info) };
        }

        std::unordered_map<int, std::pair<size_t, long long>> filtered_candidates;
        for (const auto& pair : all_candidate_info) {
            if (pair.second.first > params.min_farthest_use_threshold) {
                filtered_candidates.insert(pair);
            }
        }

        std::cout << "          DEBUG: All candidates: " << all_candidate_info.size() 
                  << ", Filtered candidates: " << filtered_candidates.size() << std::endl;
        std::cout << "          DEBUG: min_farthest_use_threshold: " << params.min_farthest_use_threshold << std::endl;
        
        // Enhanced fallback strategy: trigger earlier when candidates are insufficient
        bool use_fallback = filtered_candidates.empty() || filtered_candidates.size() < 3;
        const auto& candidates_to_use = use_fallback ? all_candidate_info : filtered_candidates;
        std::cout << "          DEBUG: Using " << (use_fallback ? "fallback" : "filtered") 
                  << " candidates: " << candidates_to_use.size() 
                  << " (fallback triggered: " << use_fallback << ")" << std::endl;

        std::vector<long long> anchor_points;
        // Original anchor points from free and allocated blocks
        for (const auto& fb : manager.getFreeBlocks()) anchor_points.push_back(fb.start);
        for (const auto& ab : manager.getAllocatedBlocks()) anchor_points.push_back(ab.start);
        
        // Enhanced: Add candidate-specific anchor points to improve overlap chances
        for (const auto& pair : candidates_to_use) {
            int buf_id = pair.first;
            // Find the allocated block for this candidate buffer
            for (const auto& ab : manager.getAllocatedBlocks()) {
                if (ab.buf_id == buf_id) {
                    // Add anchor points that would create windows overlapping this buffer
                    anchor_points.push_back(ab.start);  // Start of buffer
                    if (ab.start >= size_needed) {
                        anchor_points.push_back(ab.start - size_needed + 1);  // Window ending at buffer start
                    }
                    anchor_points.push_back(ab.start + ab.size - size_needed);  // Window starting at buffer end
                    break;
                }
            }
        }
        
        std::sort(anchor_points.begin(), anchor_points.end());
        anchor_points.erase(std::unique(anchor_points.begin(), anchor_points.end()), anchor_points.end());

        std::cout << "          DEBUG: Anchor points: " << anchor_points.size() 
                  << ", Manager capacity: " << manager.getCapacity() << std::endl;

        std::vector<SpillPlan> all_plans;
        int plan_attempt = 0;
        for (long long anchor : anchor_points) {
            long long window_start = anchor;
            long long window_end = window_start + size_needed;
            if (window_end > manager.getCapacity()) {
                continue;  // Skip plans that exceed capacity
            }

            SpillPlan current_plan;
            current_plan.start_address = anchor;
            for (const auto& ab : manager.getAllocatedBlocks()) {
                if (ab.start < window_end && (ab.start + ab.size) > window_start) {
                    if (candidates_to_use.count(ab.buf_id)) {
                        current_plan.victim_ids.push_back(ab.buf_id);
                    }
                }
            }

            // Only show plans with victims or first few failed attempts
            if (!current_plan.victim_ids.empty() || plan_attempt < 3) {
                std::cout << "          DEBUG: Plan " << plan_attempt 
                          << " window [" << window_start << ", " << window_end << ") -> " 
                          << current_plan.victim_ids.size() << " victims" << std::endl;
            }
            plan_attempt++;

            if (!current_plan.victim_ids.empty()) {
                size_t min_future_use = std::numeric_limits<size_t>::max();
                double total_volume = 0;
                for (int vid : current_plan.victim_ids) {
                    const auto& info = candidates_to_use.at(vid);
                    min_future_use = std::min(min_future_use, info.first);
                    total_volume += info.second;
                    current_plan.victim_details[vid] = info;
                }
                current_plan.cost_future_use = min_future_use;
                current_plan.cost_volume = total_volume;
                current_plan.cost_victims = static_cast<double>(current_plan.victim_ids.size());
                all_plans.push_back(current_plan);
            }
        }

        std::cout << "          DEBUG: Generated " << all_plans.size() << " spill plans" << std::endl;
        if (all_plans.empty()) {
            std::cout << "          DEBUG: No valid spill plans found!" << std::endl;
            return {};
        }

        SpillPlan best_plan;
        if (use_fallback) {
            std::sort(all_plans.begin(), all_plans.end(), [](const SpillPlan& a, const SpillPlan& b) {
                if (a.cost_future_use != b.cost_future_use) return a.cost_future_use > b.cost_future_use;
                if (a.cost_volume != b.cost_volume) return a.cost_volume < b.cost_volume;
                return a.cost_victims < b.cost_victims;
                });
            best_plan = all_plans[0];
        }
        else {
            if (all_plans.size() == 1) {
                best_plan = all_plans[0];
            }
            else {
                size_t min_c = std::numeric_limits<size_t>::max(), max_c = 0;
                double min_v = std::numeric_limits<double>::max(), max_v = std::numeric_limits<double>::min();
                double min_vic = std::numeric_limits<double>::max(), max_vic = std::numeric_limits<double>::min();

                for (const auto& p : all_plans) {
                    min_c = std::min(min_c, p.cost_future_use); max_c = std::max(max_c, p.cost_future_use);
                    min_v = std::min(min_v, p.cost_volume);   max_v = std::max(max_v, p.cost_volume);
                    min_vic = std::min(min_vic, p.cost_victims); max_vic = std::max(max_vic, p.cost_victims);
                }

                double range_c = static_cast<double>(std::max((size_t)1, max_c - min_c));
                double range_v = std::max(1.0, max_v - min_v);
                double range_vic = std::max(1.0, max_vic - min_vic);

                double min_total_cost = std::numeric_limits<double>::infinity();
                int best_plan_idx = -1;
                for (size_t i = 0; i < all_plans.size(); ++i) {
                    double norm_c = static_cast<double>(all_plans[i].cost_future_use - min_c) / range_c;
                    double norm_v = (all_plans[i].cost_volume - min_v) / range_v;
                    double norm_vic = (all_plans[i].cost_victims - min_vic) / range_vic;

                    double total_cost = params.w_use * (1.0 - norm_c) + params.w_vol * norm_v + params.w_vic * norm_vic;
                    if (total_cost < min_total_cost) {
                        min_total_cost = total_cost;
                        best_plan_idx = i;
                    }
                }
                best_plan = all_plans[best_plan_idx];
            }
        }
        return best_plan;
    }

} // namespace CoreLogic