#include "lcm_publish.h"
#include <iostream>
#include <valarray>


using namespace std;

RL_Xdog xdog_rl;

 
int RL_Xdog::init_policy(){

    std::cout << "RL model thread start"<<endl;
    cout <<"cuda_is_available:"<< torch::cuda::is_available() << endl;
    cout <<"cudnn_is_available:"<< torch::cuda::cudnn_is_available() << endl;
    
    model_path = "/home/saw/C++/sim2sim/model/policy_5.pt";//载入jit模型
    //加载策略
    load_policy();

    //初始化action
    for (int j = 0; j < 12; j++)
    {
	    action.push_back(0.0);
        action_temp.push_back(0.0);
    }

    return 0;
}

int RL_Xdog::load_policy()
{   
    std::cout << model_path << std::endl;
    //检查cuda环境
    std::cout << "cuda::is_available():" << torch::cuda::is_available() << std::endl;
    device= torch::kCPU;
    if (torch::cuda::is_available()&&1){
        device = torch::kCUDA;
    }
    std::cout<<"device:"<<device<<endl;
    model = torch::jit::load(model_path);
    std::cout << "load model is successed!" << std::endl;
    model.to(device);

    std::cout << "load model to device!" << std::endl;
    model.eval();//切换到评估模式

    return 0;
}


void RL_Xdog::handleMessage(const lcm::ReceiveBuffer *rbuf, const std::string &chan,
                       const my_lcm::Request *request)//获取机器人反馈
{              
    // 创建数组obs观测序列
    std::vector<float> obs;

    // 将接收到的数据压入到obs序列
    obs.push_back(request->omega[0]*omega_scale);
    obs.push_back(request->omega[1]*omega_scale);
    obs.push_back(request->omega[2]*omega_scale);

    obs.push_back(request->gravity_orientation[0]);
    obs.push_back(request->gravity_orientation[1]);
    obs.push_back(request->gravity_orientation[2]);

    obs.push_back(cmd_x*cmd_lin_vel);//控制指令x
    obs.push_back(cmd_y*cmd_lin_vel);//控制指令y
    obs.push_back(cmd_z*cmd_ang_vel);//控制指令z

    //关节角度
    for (int i = 0; i < 12; ++i){
        float pos = (request->q[i]  - init_pos[i])* dof_pos_scale;
        obs.push_back(pos);
    }

    //关节角速度
    for (int i = 0; i < 12; ++i){
        float vel = request->dq[i] * dof_vel_scale;
        obs.push_back(vel);
    }

    //action
    for (int i = 0; i < 12; ++i){
        obs.push_back(action[i]); 
    }

    //std::cout<<("----------------obs---------------")<<std::endl;
    //cout<<obs<<endl;
    //std::cout<<("--------------------------------")<<std::endl;


    //设置数据类型为 torch::kFloat32（32 位浮点数）
    auto options = torch::TensorOptions().dtype(torch::kFloat32);

    //将 obs.data() 指向的内存数据（如 std::vector<float>）转换为 LibTorch 张量 ​​不复制数据​​，直接共享内存。
    torch::Tensor obs_tensor = torch::from_blob(obs.data(),{1,45},options).to(device);
   
    std::vector<torch::jit::IValue> inputs;

    inputs.push_back(obs_tensor);

    //---------------------------网络推理-----------------------------
    torch::Tensor action_tensor = model.forward(inputs).toTensor();

    //压缩第0维度
    torch::Tensor action_raw = action_tensor.squeeze(0);

    //将action_raw张量转为action_vec数组
    torch::Tensor tensor_cpu = action_raw.to(torch::kCPU); // 强制转移到 CPU
    float* data_ptr = tensor_cpu.data_ptr<float>();     // 获取数据指针
    int num_elements = tensor_cpu.numel();              // 元素总数（12）

    // 直接构造 vector（避免循环）
    std::vector<float> action_vec(data_ptr, data_ptr + num_elements); 

    //将action_vec赋值给action，为后续发送action作准备
    for (int i = 0; i < 12; i++){
        action[i] = action_vec[i];
    }  
}


int main(int argc, char** argv) {

    lcm::LCM lcm;

    if (!lcm.good()) {
        std::cerr << "LCM initialization failed" << std::endl;
        return 1;
    }

    //加载策略
    xdog_rl.init_policy(); 
    
    //创建订阅者
    lcm.subscribe("LCM_OBS", &RL_Xdog::handleMessage, &xdog_rl);

    while (1){
        my_lcm::Response msg;
        for(int i=0;i<12;i++){
            msg.q_exp[i]=xdog_rl.action[i];
        }
            
        lcm.publish("LCM_ACTION", &msg);
        //std::cout << "Message robot state published!" <<std::endl;
        lcm.handle();
    }

    return 0;
}

 


