#pragma once
#include <iostream>
#include <torch/torch.h>

class COptimScheduler {
public:
    COptimScheduler(torch::optim::Adam* vOpt, float vLrFinal, int vMaxSteps) :
        m_pOpt(vOpt), m_LrInit(
            static_cast<torch::optim::AdamOptions&>(vOpt->param_groups()[0].options()).get_lr()
        ), m_LrFinal(vLrFinal), m_MaxSteps(vMaxSteps) {};
    void step(int vStep);
    float getLearningRate(int vStep);

private:
    torch::optim::Adam* m_pOpt;
    float m_LrInit;
    float m_LrFinal;
    int m_MaxSteps;
};
