# server_predict.py
import base64
import io
import pathlib
import numpy as np
import torch
from fastapi import FastAPI
from pydantic import BaseModel
from typing import Dict, Any

from omegaconf import OmegaConf
import hydra

from diffusion_policies.workspace.train_diffusion_unet_hybrid_pointcloud_workspace import \
    TrainDiffusionUnetHybridPointcloudWorkspace
from diffusion_policies.common.pytorch_util import dict_apply

# ==== 你的固定超参 ====
DIM_ACTION = 12
N_POINTS = 1024
HORIZON = 8
N_OBS = 2
N_ACTIONS = 5
OBS_KEYS = ['point_cloud', 'agent_pos']
CKPT_PATH = "/home/algo/dengweiliang/DemoGen/data/ckpts/pick_test_100-dp3-seed0/checkpoints/1039.ckpt"

# ==== HTTP 消息体 ====
class PredictRequest(BaseModel):
    # 使用 base64(np.savez_compressed(...)) 的字节串
    payload_b64: str

class PredictResponse(BaseModel):
    # 返回 base64 编码的 np.savez_compressed，键 'action' -> (N_ACTIONS, DIM_ACTION)
    payload_b64: str
    # 可选：模型/设备信息
    device: str

# ==== 加载模型（进程常驻只加载一次） ====
app = FastAPI(title="DP3 Remote Inference Server", version="1.0.0")
policy = None
device = None

def _load_policy_once():
    global policy, device
    if policy is not None:
        return

    # 使用 hydra + 你的默认配置名（和本地一致）
    with hydra.initialize_config_dir(version_base=None, config_dir="/home/algo/dengweiliang/DemoGen/diffusion_policies/diffusion_policies/config"):
        cfg = hydra.compose(config_name="dp3")
    OmegaConf.resolve(cfg)
    cfg.horizon = HORIZON
    cfg.n_obs_steps = N_OBS
    cfg.n_action_steps = N_ACTIONS

    workspace = TrainDiffusionUnetHybridPointcloudWorkspace(cfg)
    model = workspace.model
    latest_ckpt_path = pathlib.Path(CKPT_PATH)
    if latest_ckpt_path.is_file():
        workspace.load_checkpoint(path=latest_ckpt_path)
    else:
        raise FileNotFoundError(f"Checkpoint not found: {latest_ckpt_path}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    policy = model.to(device).eval()
    print(f"[Server] Model loaded on device: {device}")

@app.on_event("startup")
def _on_startup():
    _load_policy_once()

# ==== 编解码工具 ====
def loads_npz_b64(b64_str: str) -> Dict[str, np.ndarray]:
    raw = base64.b64decode(b64_str)
    buf = io.BytesIO(raw)
    with np.load(buf, allow_pickle=False) as data:
        return {k: data[k] for k in data.files}

def dumps_npz_b64(arrs: Dict[str, np.ndarray]) -> str:
    buf = io.BytesIO()
    np.savez_compressed(buf, **arrs)
    return base64.b64encode(buf.getvalue()).decode('utf-8')

# ==== 预测接口 ====
@app.post("/predict", response_model=PredictResponse)
@torch.no_grad()
def predict(req: PredictRequest):
    _load_policy_once()
    # 解包 obs：期望键 'point_cloud' (N_OBS, N_POINTS, 6), 'agent_pos' (N_OBS, DIM_ACTION)
    obs_np = loads_npz_b64(req.payload_b64)
    # 构造模型需要的张量
    obs_dict = {
        'point_cloud': obs_np['point_cloud'],
        'agent_pos':   obs_np['agent_pos'],
    }
    obs_t = dict_apply(obs_dict, lambda x: torch.from_numpy(x).to(device=device))
    obs_in = {k: obs_t[k].unsqueeze(0) for k in OBS_KEYS}  # B=1

    action_dict = policy.predict_action(obs_in)            # {'action': (1, N_ACTIONS, DIM_ACTION)}
    np_action_dict = dict_apply(action_dict, lambda x: x.detach().to('cpu').numpy())
    actions = np.squeeze(np_action_dict['action'], axis=0) # (N_ACTIONS, DIM_ACTION)

    payload_b64 = dumps_npz_b64({'action': actions})
    return PredictResponse(payload_b64=payload_b64, device=str(device))