# Project RoboOrchard
#
# Copyright (c) 2024-2025 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.

import json
import logging
import os
from io import BytesIO
from datetime import datetime
import numpy as np
import cv2
from flask import Flask, Response, jsonify, request
from gevent.pywsgi import WSGIServer
import deploy_policy  

ENABLE_MOCK_MODE = True
MOCK_ACTIONS_FILE = "mock_actions.npy"
OUTPUT_SINGLE_FRAME = True
NO_SAVE_LR = True

app = Flask(__name__)

logger = logging.getLogger("model_infer_server")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)


class ModelInference:
    def __init__(self):
        get_model=deploy_policy.get_model
        ckpt_path="model.safetensors"
        config_file="config_sem_robotwin.py"
        usr_args={"ckpt_path":ckpt_path, "config_file":config_file}
        model = get_model(usr_args)
        self.model = model
        base_save_dir = os.path.abspath("saved_img")
        os.makedirs(base_save_dir, exist_ok=True)
        session_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        self.session_save_dir = os.path.join(base_save_dir, session_timestamp)
        os.makedirs(self.session_save_dir, exist_ok=True)
        self.rgb_save_dir = os.path.join(self.session_save_dir, "rgb")
        self.depth_save_dir = os.path.join(self.session_save_dir, "depth")
        for cam_name in ["left", "head", "right"]:
            os.makedirs(os.path.join(self.rgb_save_dir, cam_name), exist_ok=True)
            os.makedirs(os.path.join(self.depth_save_dir, cam_name), exist_ok=True)
        self.viewport_rgb_path = os.path.join(base_save_dir, "viewport-rgb.png")
        self.frame_counter = 0
        logger.info(f"Created save directory: {os.path.abspath(self.session_save_dir)}")
        self.mock_actions = None
        self.mock_actions_idx = 0
        if ENABLE_MOCK_MODE:
            if os.path.exists(MOCK_ACTIONS_FILE):
                self.mock_actions = np.load(MOCK_ACTIONS_FILE)
                if self.mock_actions.ndim == 1:
                    self.mock_actions = self.mock_actions.reshape(1, -1)
                elif self.mock_actions.ndim == 3:
                    self.mock_actions = self.mock_actions.reshape(-1, self.mock_actions.shape[-1])
                assert self.mock_actions.ndim == 2, f"Mock actions should be 2D (N, 14), got shape: {self.mock_actions.shape}"
                logger.info(f"Loaded mock actions from: {os.path.abspath(MOCK_ACTIONS_FILE)}, shape: {self.mock_actions.shape}")
            else:
                logger.warning(f"Mock mode enabled but file not found: {os.path.abspath(MOCK_ACTIONS_FILE)}")
        logger.info("Model loaded successfully.")

    def infer(self, request_data):
        try:
            images = np.stack(
                [
                    np.load(BytesIO(request_data["left_rgb"].read())).astype(
                        np.uint8
                    ),
                    np.load(BytesIO(request_data["middle_rgb"].read())).astype(
                        np.uint8
                    ),
                    np.load(BytesIO(request_data["right_rgb"].read())).astype(
                        np.uint8
                    ),
                ]
            )
            rgb_names = ["left", "head", "right"]
            for idx, rgb_name in enumerate(rgb_names):
                if NO_SAVE_LR and rgb_name in ["left", "right"]:
                    continue
                rgb_file = os.path.join(self.rgb_save_dir, rgb_name, f"rgb{self.frame_counter}.png")
                cv2.imwrite(rgb_file, images[idx])
                abs_path_rgb = os.path.abspath(rgb_file)
                logger.info(f"Saved {rgb_name} rgb: {abs_path_rgb}")
            cv2.imwrite(self.viewport_rgb_path, images[1])
            logger.info(f"Updated viewport-rgb: {os.path.abspath(self.viewport_rgb_path)}")
            depths = (
                np.stack(
                    [
                        np.load(
                            BytesIO(request_data["left_depth"].read())
                        ).astype(np.float32),
                        np.load(
                            BytesIO(request_data["middle_depth"].read())
                        ).astype(np.float32),
                        np.load(
                            BytesIO(request_data["right_depth"].read())
                        ).astype(np.float32),
                    ]
                )
                / 1000.0
            )
            depth_names = ["left", "head", "right"]
            for idx, depth_name in enumerate(depth_names):
                if NO_SAVE_LR and depth_name in ["left", "right"]:
                    continue
                depth_file_npy = os.path.join(self.depth_save_dir, depth_name, f"depth{self.frame_counter}.npy")
                np.save(depth_file_npy, depths[idx])
                abs_path_npy = os.path.abspath(depth_file_npy)
                logger.info(f"Saved {depth_name} depth npy: {abs_path_npy}")
            self.frame_counter += 1

            joint_state = np.load(
                BytesIO(request_data["joint_states"].read())
            ).astype(np.float32)
            logger.info(f"Using real joint state: {joint_state}")

            intrinsics = np.eye(4, dtype=np.float32)[None].repeat(3, axis=0)
            # 从ros接收到的相机内参是3x4的，只取其3x3部分
            intrinsics[0, :3, :3] = np.load(
                BytesIO(request_data["left_camera_intrinsic"].read())
            ).astype(np.float32)[:, :3]
            intrinsics[1, :3, :3] = np.load(
                BytesIO(request_data["middle_camera_intrinsic"].read())
            ).astype(np.float32)[:, :3]
            intrinsics[2, :3, :3] = np.load(
                BytesIO(request_data["right_camera_intrinsic"].read())
            ).astype(np.float32)[:, :3]

            instruction = request_data["instruction"].read().decode("utf-8")
            # input_data = {
            #     "images": images,
            #     "depths": depths,
            #     "joint_state": joint_state,
            #     "intrinsics": intrinsics,
            #     "instruction": request_data["instruction"]
            #     .read()
            #     .decode("utf-8"),
            # }
            
            
            # --------------------------
            # 4. 按deploy_policy的encode_obs格式整理输入数据
            # （对齐deploy_policy要求的observation结构）
            # --------------------------
            observation = {
                "observation": {
                    "left_cam": {
                        "rgb": images[0], "depth": depths[0], "intrinsic_cv": intrinsics[0, :3, :3],
                        "extrinsic_cv": np.array([[1,0,0,0.1],[0,1,0,-0.2],[0,0,1,0.5]], dtype=np.float32)  # 按deploy_policy默认外参
                    },
                    "middle_cam": {
                        "rgb": images[1], "depth": depths[1], "intrinsic_cv": intrinsics[1, :3, :3],
                        "extrinsic_cv": np.array([[1,0,0,0.0],[0,1,0,0.0],[0,0,1,0.5]], dtype=np.float32)
                    },
                    "right_cam": {
                        "rgb": images[2], "depth": depths[2], "intrinsic_cv": intrinsics[2, :3, :3],
                        "extrinsic_cv": np.array([[1,0,0,-0.1],[0,1,0,0.2],[0,0,1,0.5]], dtype=np.float32)
                    }
                },
                "joint_action": {  # 按deploy_policy的关节结构整理
                    "left_arm": joint_state[0:6].tolist(),
                    "left_gripper": joint_state[6].item(),
                    "right_arm": joint_state[7:13].tolist(),
                    "right_gripper": joint_state[13].item()
                }
            }

            # model_input = deploy_policy.encode_obs(
            #     observation=observation,
            #     data_transforms=self.model.transforms,  # 用SEMPolicy自带的transforms
            #     instruction=instruction
            # )
            
            
            # # 此处需要将机械臂读取到的input_data转化为observation的格式。写一个trans函数
            # observation = trans(input_data)
            # instruction = input_data["instruction"]     # instruction是一个指令字符串
            input_data = deploy_policy.encode_obs(
        observation, self.model.transforms, instruction)  # Post-Process Observation
            
            actions = self.model.get_action(input_data)
            if ENABLE_MOCK_MODE and self.mock_actions is not None:
                mock_action = self.mock_actions[self.mock_actions_idx % len(self.mock_actions)].astype(np.float32)
                self.mock_actions_idx += 1
                if mock_action.ndim == 1:
                    actions = mock_action.reshape(1, -1)
                else:
                    actions = mock_action
                logger.info(f"Using mock actions (idx={self.mock_actions_idx-1}): shape={actions.shape}, content={actions.tolist()}")
            else:
                logger.info(f"Using real actions: shape={actions.shape}")
            if OUTPUT_SINGLE_FRAME:
                actions = actions[0:1]
                logger.info(f"Output single frame: shape={actions.shape}")
            return actions

        except Exception as e:
            logging.exception(f"Error during inference: {e}")
            return None


model_inference = ModelInference()


@app.route("/your_server_name", methods=["POST"])
def model_infer():
    try:
        data = request.files
        required_keys = [
            "left_rgb",
            "middle_rgb",
            "right_rgb",
            "left_depth",
            "middle_depth",
            "right_depth",
            "joint_states",
            "instruction",
            "left_camera_intrinsic",
            "middle_camera_intrinsic",
            "right_camera_intrinsic",
        ]
        for key in required_keys:
            if key not in data:
                return jsonify({"error": f"Missing key: {key}"}), 400
        logger.info("Received request for inference.")
        actions = model_inference.infer(data)           # 输出(32, 14)的numpy类型数组
        return Response(
            json.dumps(actions.tolist()),
        )
    except Exception as e:
        logging.error(f"Error in endpoint: {e}")
        return jsonify({"error": str(e)}), 500


if __name__ == "__main__":
    http_server = WSGIServer(("", 2001), app)
    http_server.serve_forever()
# cd /home/agilex/Downloads/RoboOrchardLab/projects/sem/robotwin/ros2_ws/robo_orchard_deploy_ros2/example/robo_orchard_deploy/saved_img/20251102_134615 && zip ~/Downloads/sbkt-fp.zip . -r -x "./rgb/left/*" -x "./rgb/right/*" -x "./depth/left/*" -x "./depth/right/*"