"""
将 StreamClient 封装为 TorchRL 环境
"""
from __future__ import annotations

import torch
import numpy as np
from tensordict import TensorDict
from torchrl.data import BoundedTensorSpec, CompositeSpec, UnboundedContinuousTensorSpec
from torchrl.envs import EnvBase

from parnassus.clients.arm_stream.client import StreamClient


class ArmTorchRLEnv(EnvBase):
    """
    将 StreamClient 封装为 TorchRL 环境
    
    Args:
        address: gRPC 服务器地址,例如 "localhost:50051"
        device: torch 设备
        batch_size: 批量大小(通常为空,表示单环境)
    """
    
    def __init__(
        self,
        address: str = "localhost:50051",
        device: str | torch.device = "cpu",
        batch_size: torch.Size | None = None,
    ):
        super().__init__(device=device, batch_size=batch_size or torch.Size([]))
        
        self.address = address
        self.client = None
        
        # 定义观测空间和动作空间
        # 根据你的实际环境调整这些值
        self._make_spec()
        
    def _make_spec(self):
        """定义环境的规格(observation_spec, action_spec, reward_spec)"""
        
        # 1. 观测空间 - 根据你的环境调整维度
        # 假设观测是 6 维连续值
        self.observation_spec = CompositeSpec(
            observation=UnboundedContinuousTensorSpec(
                shape=(4,),  # 修改为你实际的观测维度
                dtype=torch.float32,
                device=self.device,
            ),
            shape=self.batch_size,
        )
        
        # 2. 动作空间 - 根据你的环境调整
        # 假设动作是 1 维,范围 [-1, 1]
        self.action_spec = BoundedTensorSpec(
            low=-1.0,
            high=1.0,
            shape=(1,),  # 修改为你实际的动作维度
            dtype=torch.float32,
            device=self.device,
        )
        
        # 3. 奖励空间
        self.reward_spec = UnboundedContinuousTensorSpec(
            shape=(1,),
            dtype=torch.float32,
            device=self.device,
        )
        
    def _reset(self, tensordict: TensorDict | None = None, **kwargs) -> TensorDict:
        """
        重置环境
        
        Args:
            tensordict: 可选的输入 TensorDict(可能包含 seed 等)
            **kwargs: 其他参数(如 seed)
            
        Returns:
            包含初始观测的 TensorDict
        """
        # 连接客户端(如果还未连接)
        if self.client is None:
            self.client = StreamClient(self.address)
            self.client.connect()
        
        # 获取 seed(如果提供)
        seed = kwargs.get("seed", None)
        if tensordict is not None and "seed" in tensordict.keys():
            seed = tensordict["seed"].item()
        
        # 调用客户端的 reset
        obs = self.client.reset(seed=seed)
        
        # 转换为 torch tensor
        obs_tensor = torch.tensor(obs, dtype=torch.float32, device=self.device)
        
        # 创建返回的 TensorDict
        tensordict_out = TensorDict(
            {
                "observation": obs_tensor,
            },
            batch_size=self.batch_size,
            device=self.device,
        )
        
        return tensordict_out
    
    def _step(self, tensordict: TensorDict) -> TensorDict:
        """
        执行一步
        
        Args:
            tensordict: 包含 "action" 的 TensorDict
            
        Returns:
            包含 observation, reward, done, terminated, truncated 的 TensorDict
        """
        # 获取动作
        action = tensordict["action"]
        
        # 转换为 numpy(如果需要)
        if isinstance(action, torch.Tensor):
            action_np = action.cpu().numpy()
        else:
            action_np = action
        
        # 调用客户端的 step
        reply = self.client.step(action_np)
        
        # 解析返回值
        obs = reply["observation"]
        reward = reply["reward"]
        terminated = reply["terminated"]
        truncated = reply["truncated"]
        
        # 转换为 tensor
        obs_tensor = torch.tensor(obs, dtype=torch.float32, device=self.device)
        
        # 处理 reward(可能是 tensor 或标量)
        if isinstance(reward, torch.Tensor):
            reward_tensor = reward.to(dtype=torch.float32, device=self.device)
        else:
            reward_tensor = torch.tensor([reward], dtype=torch.float32, device=self.device)
        
        # 处理 done 标志
        if isinstance(terminated, torch.Tensor):
            terminated_tensor = terminated.to(dtype=torch.bool, device=self.device)
        else:
            terminated_tensor = torch.tensor([terminated], dtype=torch.bool, device=self.device)
            
        if isinstance(truncated, torch.Tensor):
            truncated_tensor = truncated.to(dtype=torch.bool, device=self.device)
        else:
            truncated_tensor = torch.tensor([truncated], dtype=torch.bool, device=self.device)
        
        # done = terminated OR truncated
        done_tensor = terminated_tensor | truncated_tensor
        
        # 创建返回的 TensorDict
        tensordict_out = TensorDict(
            {
                "observation": obs_tensor,
                "reward": reward_tensor,
                "done": done_tensor,
                "terminated": terminated_tensor,
                "truncated": truncated_tensor,
            },
            batch_size=self.batch_size,
            device=self.device,
        )
        # print("Step result:", tensordict_out['observation'])
        return tensordict_out
    
    def _set_seed(self, seed: int | None):
        """设置随机种子"""
        # 如果你的环境支持种子,可以在这里实现
        # 通常在 reset 时传递 seed 即可
        pass
    
    def close(self, raise_if_closed=True):
        """关闭环境"""
        if self.client is not None:
            self.client.close()
            self.client.shutdown()
            self.client = None
        elif raise_if_closed:
            raise RuntimeError("Env already closed")
    
    def __del__(self):
        """析构函数"""
        self.close()


# ============ 可选:添加环境转换 ============
def make_arm_env(
    address: str = "localhost:50051",
    device: str | torch.device = "cpu",
    from_pixels: bool = False,
) -> EnvBase:
    """
    创建 Arm 环境的工厂函数
    
    Args:
        address: gRPC 服务器地址
        device: torch 设备
        from_pixels: 是否使用像素观测(暂不支持)
        
    Returns:
        配置好的环境
    """
    from torchrl.envs import (
        Compose,
        DoubleToFloat,
        ObservationNorm,
        StepCounter,
        TransformedEnv,
    )
    
    # 创建基础环境
    env = ArmTorchRLEnv(address=address, device=device)
    
    # 添加转换
    env = TransformedEnv(
        env,
        Compose(
            # 观测归一化
            ObservationNorm(in_keys=["observation"]),
            # 数据类型转换
            DoubleToFloat(),
            # 步数计数器
            StepCounter(),
        ),
    )
    
    # 初始化归一化统计量
    env.transform[0].init_stats(num_iter=1000, reduce_dim=0, cat_dim=0)
    
    return env