import warnings
from torch import multiprocessing
from collections import defaultdict
import matplotlib.pyplot as plt
import torch
from tensordict.nn import TensorDictModule
from tensordict.nn.distributions import NormalParamExtractor
from torch import nn
from torchrl.collectors import SyncDataCollector
from torchrl.data.replay_buffers import ReplayBuffer
from torchrl.data.replay_buffers.samplers import SamplerWithoutReplacement
from torchrl.data.replay_buffers.storages import LazyTensorStorage
from torchrl.envs import (
    Compose,
    DoubleToFloat,
    ObservationNorm,
    StepCounter,
    TransformedEnv,
)
from torchrl.envs.libs.gym import GymEnv
from torchrl.envs.utils import check_env_specs, ExplorationType, set_exploration_type
from torchrl.modules import ProbabilisticActor, TanhNormal, ValueOperator
from torchrl.objectives import ClipPPOLoss
from torchrl.objectives.value import GAE
from tqdm import tqdm
import time
import numpy as np
import cv2

warnings.filterwarnings("ignore")
is_fork = multiprocessing.get_start_method() == "fork"
device = (
    torch.device(0)
    if torch.cuda.is_available() and not is_fork
    else torch.device("cpu")
)
num_cells = 512
lr = 1e-4
max_grad_norm = 0.2
frames_per_batch = 2048
total_frames = 5_000_000
sub_batch_size = 1024
num_epochs = 10
clip_epsilon = 0.2
gamma = 0.99
lmbda = 0.95
entropy_eps = 1e-3
base_env = GymEnv(
    "Humanoid-v4",
    from_pixels=True,
    pixels_only=False,
    device=device,
)
env = TransformedEnv(
    base_env,
    Compose(
        ObservationNorm(in_keys=["observation"]),
        DoubleToFloat(),
        StepCounter(),
    ),
)
env.transform[0].init_stats(num_iter=200, reduce_dim=0, cat_dim=0)
print("normalization constant shape:", env.transform[0].loc.shape)
print("observation_spec:", env.observation_spec)
print("reward_spec:", env.reward_spec)
print("input_spec:", env.input_spec)
print("action_spec (as defined by input_spec):", env.action_spec)
rollout = env.rollout(3)
print("rollout of three steps:", rollout)
print("Shape of the rollout TensorDict:", rollout.batch_size)


class BatchNormWithoutBatch(nn.Module):
    """
    自定义BatchNorm层，支持没有batch维度的输入
    当输入为1维时，自动添加batch维度，处理后再移除
    """

    def __init__(
        self,
        num_features,
        eps=1e-5,
        momentum=0.1,
        affine=True,
        track_running_stats=True,
    ):
        super(BatchNormWithoutBatch, self).__init__()
        # 初始化标准BatchNorm1d
        self.bn = nn.BatchNorm1d(
            num_features=num_features,
            eps=eps,
            momentum=momentum,
            affine=affine,
            track_running_stats=track_running_stats,
        )
        self.num_features = num_features

    def forward(self, input):
        # 保存原始输入形状
        original_shape = input.shape

        # 如果输入是1维，添加batch维度
        if len(original_shape) == 1:
            input = input.unsqueeze(0)  # 形状从(features,)变为(1, features)
        # 如果输入是2维，保持不变
        elif len(original_shape) == 2:
            pass  # 形状为(batch_size, features)
        # 如果输入维度超过2维，调整形状
        else:
            # 假设输入形状为(batch_size, features, ...)
            # 将特征维度之后的所有维度展平
            batch_size = original_shape[0]
            features = original_shape[1]
            input = input.view(batch_size, features, -1)
            input = input.transpose(1, 2).contiguous()
            input = input.view(-1, features)

        # 应用BatchNorm
        output = self.bn(input)

        # 恢复原始形状
        if len(original_shape) == 1:
            output = output.squeeze(0)  # 从(1, features)变回(features,)
        elif len(original_shape) > 2:
            # 恢复原始多维形状
            output = output.view(-1, features, original_shape[2:].numel())
            output = output.transpose(1, 2).contiguous()
            output = output.view(
                original_shape[0], original_shape[1], *original_shape[2:]
            )

        return output

    def extra_repr(self) -> str:
        return (
            f"num_features={self.num_features}, eps={self.bn.eps}, momentum={self.bn.momentum}, "
            f"affine={self.bn.affine}, track_running_stats={self.bn.track_running_stats}"
        )


actor_net = nn.Sequential(
    BatchNormWithoutBatch(376).to(device),
    nn.LazyLinear(num_cells, device=device),
    nn.LeakyReLU(),
    nn.LazyLinear(num_cells, device=device),
    nn.LeakyReLU(),
    nn.LazyLinear(2 * env.action_spec.shape[-1], device=device),
    NormalParamExtractor(),
)
policy_module = TensorDictModule(
    actor_net, in_keys=["observation"], out_keys=["loc", "scale"]
)
policy_module = ProbabilisticActor(
    module=policy_module,
    spec=env.action_spec,
    in_keys=["loc", "scale"],
    distribution_class=TanhNormal,
    distribution_kwargs={
        "low": env.action_spec.space.low,
        "high": env.action_spec.space.high,
    },
    return_log_prob=True,
)
value_net = nn.Sequential(
    BatchNormWithoutBatch(376).to(device),
    nn.LazyLinear(num_cells, device=device),
    nn.LeakyReLU(),
    nn.LazyLinear(num_cells, device=device),
    nn.LeakyReLU(),
    nn.LazyLinear(1, device=device),
)
value_module = ValueOperator(module=value_net, in_keys=["observation"])
policy_module.eval()
value_module.eval()
print("Running policy:", policy_module(env.reset()))
print("Running value:", value_module(env.reset()))
loss_module = ClipPPOLoss(
    actor_network=policy_module,
    critic_network=value_module,
    clip_epsilon=clip_epsilon,
    entropy_bonus=bool(entropy_eps),
    entropy_coef=entropy_eps,
    critic_coef=1.0,
    loss_critic_type="smooth_l1",
)
optim = torch.optim.Adam(loss_module.parameters(), lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
    optim, total_frames // frames_per_batch, 0.0
)
if 0:
    checkpoint = torch.load("model_weights_test_1220.pth")
    policy_module.load_state_dict(checkpoint["policy_weights"])
    value_module.load_state_dict(checkpoint["value_weights"])
collector = SyncDataCollector(
    env,
    policy_module,
    frames_per_batch=frames_per_batch,
    total_frames=total_frames,
    split_trajs=False,
    device=device,
)
replay_buffer = ReplayBuffer(
    storage=LazyTensorStorage(max_size=frames_per_batch),
    sampler=SamplerWithoutReplacement(),
)
advantage_module = GAE(
    gamma=gamma,
    lmbda=lmbda,
    value_network=value_module,
    average_gae=True,
    device=device,
)
save_path = "./ppo_record/"
base = 0
logs = defaultdict(list)
pbar = tqdm(total=total_frames)
eval_str = ""
policy_module.eval()
value_module.eval()
for i, tensordict_data in enumerate(collector):
    if tensordict_data["observation"].isnan().any():
        raise RuntimeError("onservation中发现NaN！请检查归一化逻辑。")
    if tensordict_data["next", "reward"].isnan().any():
        raise RuntimeError("reward中发现NaN奖励！请检查环境交互逻辑。")

    for _ in range(num_epochs):
        actor_net[0].bn.track_running_stats = False
        value_net[0].bn.track_running_stats = False
        advantage_module(tensordict_data)
        actor_net[0].bn.track_running_stats = True
        value_net[0].bn.track_running_stats = True
        if tensordict_data["advantage"].isnan().any():
            print("advantage中发现NaN！请检查归一化逻辑。")
            break
        data_view = tensordict_data.reshape(-1)
        replay_buffer.extend(data_view.cpu())
        policy_module.train()
        value_module.train()
        for _ in range(frames_per_batch // sub_batch_size):
            subdata = replay_buffer.sample(sub_batch_size)
            loss_vals = loss_module(subdata.to(device))
            loss_value = (
                loss_vals["loss_objective"]
                + loss_vals["loss_critic"]
                + loss_vals["loss_entropy"]
            )
            loss_value.backward()
            torch.nn.utils.clip_grad_norm_(loss_module.parameters(), max_grad_norm)
            optim.step()
            optim.zero_grad()
        policy_module.eval()
        value_module.eval()
    logs["reward"].append(tensordict_data["next", "reward"].mean().item())
    pbar.update(tensordict_data.numel())
    cum_reward_str = (
        f"average reward={logs['reward'][-1]:.4f} (init={logs['reward'][0]:.4f})"
    )
    logs["step_count"].append(tensordict_data["step_count"].max().item())
    stepcount_str = f"step count (max): {logs['step_count'][-1]}"
    logs["lr"].append(optim.param_groups[0]["lr"])
    lr_str = f"lr policy: {logs['lr'][-1]:.4f}"
    if i % 10 == 0:
        env.eval()
        with set_exploration_type(ExplorationType.DETERMINISTIC), torch.no_grad():
            eval_rollout = env.rollout(1000, policy_module)
            if eval_rollout["next", "reward"].isnan().any():
                raise RuntimeError("评估数据中发现NaN奖励！请检查环境或策略输出。")
            if eval_rollout["observation"].isnan().any():
                raise RuntimeError("评估观测中发现NaN！请检查归一化评估模式。")
            logs["eval reward"].append(eval_rollout["next", "reward"].mean().item())
            logs["eval reward (sum)"].append(
                eval_rollout["next", "reward"].sum().item()
            )
            logs["eval step_count"].append(eval_rollout["step_count"].max().item())
            eval_str = (
                f"eval cumulative reward: {logs['eval reward (sum)'][-1]:.4f} "
                f"(init: {logs['eval reward (sum)'][0]:.4f}), "
                f"eval step-count: {logs['eval step_count'][-1]}"
            )

            ims = eval_rollout["pixels"].cpu().numpy().astype("uint8")
            fourcc = cv2.VideoWriter_fourcc(*"mp4v")
            out = cv2.VideoWriter(
                save_path + f"video/output_test_{i + base}.mp4",
                fourcc,
                30,
                (ims.shape[2], ims.shape[1]),
            )
            for frame in ims:
                out.write(frame[..., ::-1])
            out.release()
            del eval_rollout
        env.train()
        torch.save(
            {
                "policy_weights": policy_module.state_dict(),
                "value_weights": value_module.state_dict(),
            },
            save_path + f"weight/model_weights_test_{i + base}.pth",
        )
        np.save(save_path + "weight/norm_loc_test_{i+base}", env.transform[0].loc.cpu())
        np.save(
            save_path + "weight/norm_scale_test_{i+base}", env.transform[0].scale.cpu()
        )
    pbar.set_description(", ".join([eval_str, cum_reward_str, stepcount_str, lr_str]))
    scheduler.step()
