import time

import numpy as np
import torch
import torch.optim as optim
import torch.nn.functional as F
import gymnasium as gym
from model.meow import FlowPolicy
from stable_baselines3.common.buffers import ReplayBuffer
from args import Args
import tyro
from torch.utils.tensorboard import SummaryWriter
from setproctitle import setproctitle

def make_env(env_id, seed):
    def thunk():
        env = gym.make(env_id)
        env = gym.wrappers.RecordEpisodeStatistics(env)
        env = gym.wrappers.RescaleAction(env, min_action=-1.0, max_action=1.0)
        env.action_space.seed(seed)
        return env
    return thunk

def evaluate(envs, policy, device, deterministic=True):
    with torch.no_grad():
        policy.eval()
        num_envs = envs.unwrapped.num_envs
        rewards = np.zeros((num_envs,))
        dones = np.zeros((num_envs,)).astype(bool)
        s, _ = envs.reset(seed=range(num_envs))
        while not all(dones):
            a, _ = policy.sample(num_samples=s.shape[0], obs=s, deterministic=deterministic)
            a = a.cpu().detach().numpy()
            s_, r, terminated, truncated, _ = envs.step(a)
            done = terminated | truncated
            rewards += r * (1-dones)
            dones |= done
            s = s_
    return rewards.mean()

def train():
    args = tyro.cli(Args)

    # 获取时间，年/月/日/时/分/秒
    time_str = time.strftime("%Y%m%d-%H%M%S")
    run_name = f"MEow-{args.env_id}-{time_str}"
    writer = SummaryWriter(f"runs/{run_name}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    envs = gym.vector.SyncVectorEnv([make_env(args.env_id, args.seed)]) # trick: 用上矩阵env可以匹配rb的batch，统一多一个维度
    test_envs = gym.make_vec(args.env_id, num_envs=args.test_envs_num) # 用于测试的env，用多几个求平均
    test_envs = gym.wrappers.RescaleAction(test_envs, -1, 1)

    policy = FlowPolicy(alpha=args.alpha, 
                        sigma_max=args.sigma_max, 
                        sigma_min=args.sigma_min, 
                        action_sizes=envs.action_space.shape[1], 
                        state_sizes=envs.observation_space.shape[1],
                        device=device).to(device)
    policy_target = FlowPolicy(alpha=args.alpha, 
                        sigma_max=args.sigma_max, 
                        sigma_min=args.sigma_min, 
                        action_sizes=envs.action_space.shape[1], 
                        state_sizes=envs.observation_space.shape[1],
                        device=device).to(device)
    policy_target.load_state_dict(policy.state_dict())
    q_optimizer = optim.Adam(policy.parameters(), lr=args.q_lr)

    # Automatic entropy tuning
    if args.autotune:
        target_entropy = -torch.prod(torch.Tensor(envs.single_action_space.shape).to(device)).item()
        log_alpha = torch.zeros(1, requires_grad=True, device=device)
        alpha = log_alpha.exp().item()
        a_optimizer = optim.Adam([log_alpha], lr=args.q_lr)
    else:
        alpha = args.alpha

    envs.single_observation_space.dtype = np.float32
    rb = ReplayBuffer(
        args.buffer_size,
        envs.single_observation_space,
        envs.single_action_space,
        device,
        handle_timeout_termination=False,
    )

    best_evaluate_return = -np.inf

    obs, _ = envs.reset(seed=args.seed)
    for global_step in range(args.total_timesteps):
        # ALGO LOGIC: put action logic here
        if global_step < args.learning_starts:
            actions = np.array([envs.single_action_space.sample() for _ in range(envs.num_envs)])
        else:
            policy.eval()
            actions, _ = policy.sample(num_samples=obs.shape[0], obs=obs, deterministic=False)
            actions = actions.detach().cpu().numpy()
        
        next_obs, rewards, terminations, truncations, infos = envs.step(actions)

        if "final_info" in infos:
            info  = infos["final_info"][0]
            print(f"Step={global_step}, episodic_return={info['episode']['r']}")
            writer.add_scalar("charts/episodic_return", info['episode']['r'], global_step)
        
        rb.add(obs, next_obs, actions, rewards, terminations, infos)

        obs = next_obs

        if global_step > args.learning_starts:
            data = rb.sample(args.batch_size)
            with torch.no_grad():
                # speed up training by removing true_v and min_q
                policy_target.eval()
                v_old = policy_target.get_v(torch.cat((data.next_observations, data.next_observations), dim=0))
                exact_v_old = torch.min(v_old[:v_old.shape[0]//2], v_old[v_old.shape[0]//2:])
                target_q = data.rewards.flatten() + (1-data.dones.flatten()) * args.gamma * (exact_v_old).view(-1)

            policy.train() # for dropout
            current_q1, _ = policy.get_qv(torch.cat((data.observations, data.observations), dim=0), torch.cat((data.actions, data.actions), dim=0))
            target_q = torch.cat((target_q, target_q), dim=0)
            qf_loss = F.mse_loss(current_q1.flatten(), target_q.flatten())
            qf_loss[qf_loss != qf_loss] = 0.0
            qf_loss = qf_loss.mean()

            # optimize the model
            q_optimizer.zero_grad()
            qf_loss.backward()
            if args.grad_clip > 0:
                torch.nn.utils.clip_grad_norm_(policy.parameters(), args.grad_clip)
            q_optimizer.step()

            if args.autotune:
                with torch.no_grad():
                    policy.eval() # for dropout
                    _, log_pi = policy.sample(num_samples=data.observations.shape[0], obs=data.observations, deterministic=args.deterministic_action)
                alpha_loss = (-log_alpha.exp() * (log_pi + target_entropy)).mean()

                a_optimizer.zero_grad()
                alpha_loss.backward()
                a_optimizer.step()
                alpha = log_alpha.exp().item()

            # update the target networks
            if global_step % args.target_network_frequency == 0:
                for param, target_param in zip(policy.parameters(), policy_target.parameters()):
                    target_param.data.copy_(args.tau * param.data + (1 - args.tau) * target_param.data)

            if global_step % args.evaluate_frequency == 0:
                evaluate_return = evaluate(test_envs, policy, device, deterministic=args.deterministic_action)
                writer.add_scalar("charts/evaluate_return", evaluate_return, global_step)
                if evaluate_return > best_evaluate_return:
                    best_evaluate_return = evaluate_return
                    torch.save(policy, f"pts/{run_name}-best.pt")

    print("Training finished.")
    envs.close()
    test_envs.close()
    writer.close()

if __name__ == '__main__':
    setproctitle("meow-kxk")
    train()

