/* SPDX-FileCopyrightText: 2025 LichtFeld Studio Authors
 *
 * SPDX-License-Identifier: GPL-3.0-or-later */

#include "strategy_utils.hpp"
#include "optimizers/fused_adam.hpp"

namespace gs::training {
    void initialize_gaussians(gs::SplatData& splat_data) {
        const auto dev = torch::kCUDA;
        splat_data.means() = splat_data.means().to(dev).set_requires_grad(true);
        splat_data.scaling_raw() = splat_data.scaling_raw().to(dev).set_requires_grad(true);
        splat_data.rotation_raw() = splat_data.rotation_raw().to(dev).set_requires_grad(true);
        splat_data.opacity_raw() = splat_data.opacity_raw().to(dev).set_requires_grad(true);
        splat_data.sh0() = splat_data.sh0().to(dev).set_requires_grad(true);
        splat_data.shN() = splat_data.shN().to(dev).set_requires_grad(true);
        splat_data._densification_info = torch::zeros({2, splat_data.means().size(0)}, splat_data.means().options()).set_requires_grad(false);
    }

    std::unique_ptr<torch::optim::Optimizer> create_optimizer(
        gs::SplatData& splat_data,
        const gs::param::OptimizationParameters& params) {
        using Options = FusedAdam::Options;
        std::vector<torch::optim::OptimizerParamGroup> groups;

        // Create groups with proper unique_ptr<Options>
        auto add_param_group = [&groups](const torch::Tensor& param, double lr) {
            auto options = std::make_unique<Options>(lr);
            options->eps(1e-15).betas(std::make_tuple(0.9, 0.999));
            groups.emplace_back(
                std::vector<torch::Tensor>{param},
                std::unique_ptr<torch::optim::OptimizerOptions>(std::move(options)));
        };

        add_param_group(splat_data.means(), params.means_lr * splat_data.get_scene_scale());
        add_param_group(splat_data.sh0(), params.shs_lr);
        add_param_group(splat_data.shN(), params.shs_lr / 20.f);
        add_param_group(splat_data.scaling_raw(), params.scaling_lr);
        add_param_group(splat_data.rotation_raw(), params.rotation_lr);
        add_param_group(splat_data.opacity_raw(), params.opacity_lr);

        auto global_options = std::make_unique<Options>(0.f);
        global_options->eps(1e-15);
        return std::make_unique<FusedAdam>(std::move(groups), std::move(global_options));
    }

    std::unique_ptr<ExponentialLR> create_scheduler(
        const gs::param::OptimizationParameters& params,
        torch::optim::Optimizer* optimizer,
        int param_group_index) {
        // Python: gamma = final_lr_fraction^(1/max_steps)
        // This means after max_steps, lr will be final_lr_fraction * initial_lr
        const double gamma = compute_lr_decay_gamma(params.final_lr_fraction, params.iterations);
        return std::make_unique<ExponentialLR>(*optimizer, gamma, param_group_index);
    }

    std::unique_ptr<WarmupExponentialLR> create_warmup_scheduler(
        const gs::param::OptimizationParameters& params,
        torch::optim::Optimizer* optimizer,
        int param_group_index,
        int warmup_steps,
        float warmup_start_factor) {

        const double gamma = compute_lr_decay_gamma(params.final_lr_fraction, params.iterations);

        return std::make_unique<WarmupExponentialLR>(
            *optimizer, gamma, warmup_steps, warmup_start_factor, param_group_index);
    }

    void update_param_with_optimizer(
        const ParamUpdateFn& param_fn,
        const OptimizerUpdateFn& optimizer_fn,
        std::unique_ptr<torch::optim::Optimizer>& optimizer,
        gs::SplatData& splat_data,
        std::vector<size_t> param_idxs) {
        std::array<torch::Tensor*, 6> params = {
            &splat_data.means(),
            &splat_data.sh0(),
            &splat_data.shN(),
            &splat_data.scaling_raw(),
            &splat_data.rotation_raw(),
            &splat_data.opacity_raw()};

        std::array<torch::Tensor, 6> new_params;

        // Collect old parameter keys and states
        std::vector<void*> old_param_keys;
        std::array<std::unique_ptr<torch::optim::OptimizerParamState>, 6> saved_states;

        for (auto i : param_idxs) {
            auto param = params[i];
            auto new_param = param_fn(i, *param);
            new_params[i] = new_param;

            auto& old_param = optimizer->param_groups()[i].params()[0];
            void* old_param_key = old_param.unsafeGetTensorImpl();
            old_param_keys.push_back(old_param_key);

            // Check if state exists
            auto state_it = optimizer->state().find(old_param_key);
            if (state_it != optimizer->state().end()) {
                auto* fused_adam_state = static_cast<FusedAdam::AdamParamState*>(state_it->second.get());
                auto new_state = optimizer_fn(*fused_adam_state, new_param);
                saved_states[i] = std::move(new_state);
            } else {
                saved_states[i] = nullptr;
            }
        }

        // Now remove all old states
        for (auto key : old_param_keys) {
            optimizer->state().erase(key);
        }

        // Update parameters and add new states
        for (auto i : param_idxs) {
            optimizer->param_groups()[i].params()[0] = new_params[i];

            if (saved_states[i]) {
                void* new_param_key = new_params[i].unsafeGetTensorImpl();
                optimizer->state()[new_param_key] = std::move(saved_states[i]);
            }
        }

        // Update the splat_data with new parameters
        for (auto i : param_idxs) {
            if (i == 0) {
                splat_data.means() = new_params[i];
            } else if (i == 1) {
                splat_data.sh0() = new_params[i];
            } else if (i == 2) {
                splat_data.shN() = new_params[i];
            } else if (i == 3) {
                splat_data.scaling_raw() = new_params[i];
            } else if (i == 4) {
                splat_data.rotation_raw() = new_params[i];
            } else if (i == 5) {
                splat_data.opacity_raw() = new_params[i];
            }
        }
    }
} // namespace gs::training
