import torch
import torch_npu
from torch_npu.contrib import transfer_to_npu
torch.npu.set_compile_mode(jit_compile=False)

from lerobot.policies.pi0.modeling_pi0 import PI0Policy
from lerobot.configs.types import FeatureType, PolicyFeature
import time

import numpy as np
import cv2
from mozrobot import MOZ1Robot, MOZ1RobotConfig
from scipy.interpolate import interp1d

"""
本代码无法直接运行，仅作为示例代码，展示如何将PI0模型与千寻SDK对接。
"""


# ----------------------频率转换函数（30Hz→120Hz，线性插值） ----------------------
def resample_action_data(raw_data, raw_freq, target_freq, interp_kind="linear"):
    """
    机器人动作数据频率插值（线性插值保证动作平滑）
    """
    # 计算目标动作点数（120Hz是30Hz的4倍，点数≈原始×4）
    raw_points = raw_data.shape[0]
    total_time = raw_points / raw_freq  # 总时长不变
    target_points = int(total_time * target_freq)

    # 构建时间轴（确保插值后时间对齐，无动作加速/减速）
    raw_time = np.linspace(0, total_time, raw_points, endpoint=False)
    target_time = np.linspace(0, total_time, target_points, endpoint=False)

    # 对每个维度单独插值（避免维度干扰）
    resampled_data = np.zeros((target_points, raw_data.shape[1]))
    for dim in range(raw_data.shape[1]):
        interp_func = interp1d(
            x=raw_time,
            y=raw_data[:, dim],
            kind=interp_kind,
            bounds_error=False,
            fill_value="extrapolate"
        )
        resampled_data[:, dim] = interp_func(target_time)

    return resampled_data, target_time

def main():

    # 1. 加载模型
    dtype = torch.float16
    device = torch.device("npu")

    t1 = time.perf_counter()
    # 模型路径
    policy = PI0Policy.from_pretrained("/home/huawei/atb_model", local_files_only=True).to(device="npu", dtype=dtype)
    t2 = time.perf_counter()
    print(f"Load model time: {t2 - t1} s.")

    policy.eval()


    # 2. 创建机器人配置
    config = MOZ1RobotConfig(
        realsense_serials="318122301415, 230322273678, 230322271083",      # TODO:填
        structure="wholebody",
        robot_control_hz=120
    )

    # 3. 创建机器人实例
    robot = MOZ1Robot(config)

    # 发送控制命令频率
    dt = 1.0 / robot.control_hz
    # 推理频率
    infer_dt = 1.0

    raw_freq = 30
    target_freq = robot.control_hz

    try:
        # 4. 连接机器人
        if robot.connect():
            print("机器人连接成功！")

        # 5. 启用外部控制模式（必需）
        robot.enable_external_following_mode()

        while True:
            # 6. 获取机器人当前状态
            obs = robot.capture_observation()
            temp_state = np.concatenate([obs["leftarm_state_joint_pos"], obs["leftarm_gripper_state_pos"], obs["rightarm_state_joint_pos"], obs["rightarm_gripper_state_pos"]], axis=0)
            robot_state = torch.from_numpy(temp_state).to(dtype=dtype, device=device).unsqueeze(0)

            # 数据处理(gbr2rgb, normalize)
            obs["cam_high"] = cv2.cvtColor(obs["cam_high"], cv2.COLOR_BGR2RGB)
            obs["cam_high"] = obs["cam_high"].astype(np.float32) / 255.0

            obs["cam_left_wrist"] = cv2.cvtColor(obs["cam_left_wrist"], cv2.COLOR_BGR2RGB)
            obs["cam_left_wrist"] = obs["cam_left_wrist"].astype(np.float32) / 255.0

            obs["cam_right_wrist"] = cv2.cvtColor(obs["cam_right_wrist"], cv2.COLOR_BGR2RGB)
            obs["cam_right_wrist"] = obs["cam_right_wrist"].astype(np.float32) / 255.0

            image_top = torch.from_numpy(obs["cam_high"]).permute(2, 0, 1).to(dtype=dtype, device=device).unsqueeze(0)
            image_left = torch.from_numpy(obs["cam_left_wrist"]).permute(2, 0, 1).to(dtype=dtype, device=device).unsqueeze(0)
            image_right = torch.from_numpy(obs["cam_right_wrist"]).permute(2, 0, 1).to(dtype=dtype, device=device).unsqueeze(0)

            with torch.inference_mode():
                policy.reset()
                observation = {
                    "observation.images.head": image_top,
                    "observation.images.left_hand": image_left,
                    "observation.images.right_hand": image_right,
                    "observation.state":robot_state,
                    "task": ["Fold the T-shirt."],
                }
                t1 = time.perf_counter()
                action_pi= policy.select_actions(observation)
                t2 = time.perf_counter()
                print(f"Infer time: {(t2 - t1)*1000} ms")
                print("send action to the robot server")


            action_list = action_pi.cpu().numpy()
            action_list = action_list[0]

            resampled_data, target_time = resample_action_data(
                raw_data=action_list[10:40],
                raw_freq=raw_freq,
                target_freq=target_freq,
                interp_kind="linear"  # 线性插值适合机器人动作，无异常极值
            )
            i = 0
            for action in resampled_data:
                act = {
                        "leftarm_cmd_joint_pos": np.asarray(action[0:7], dtype=np.float32).tolist(),
                        "leftarm_gripper_cmd_pos": np.asarray(action[7:8], dtype=np.float32).tolist(),
                        "rightarm_cmd_joint_pos": np.asarray(action[8:15], dtype=np.float32).tolist(),
                        "rightarm_gripper_cmd_pos": np.asarray(action[15:16], dtype=np.float32).tolist(),
                    }
                robot.send_action(act)
                i += 1
                time.sleep(dt)

    except Exception as e:
        print(f"错误: {e}")
    finally:
        # 7. 断开连接
        robot.disconnect()

if __name__ == "__main__":
    main()
