'''
未验证

训练记录：
1. 通过修复了游戏过程中没有选择点击的效果，目前训练游戏分数在-4~-5之间震荡，暂时未知是什么原因，
计划在训练4小时，如果依旧没有提升，则考虑调整超参数

2. 增加了人工演示指导模型训练的代码，准备看效果如何
'''
#!/usr/bin/env python3
import os
import gymnasium as gym
import random
import argparse
import numpy as np
from tensorboardX import SummaryWriter
from PIL import Image
from lib import wob_vnc, model_vnc, common, vnc_demo

import ptan

import torch
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim
import cv2

from game import ImageTextGame


REMOTES_COUNT = 8
ENV_NAME = "ImageTextGame-v0"

GAMMA = 0.99
REWARD_STEPS = 1
BATCH_SIZE = 128
LEARNING_RATE = 1e-4
ENTROPY_BETA = 0.01
CLIP_GRAD = 0.1  # Increased from 0.05 to ensure gradients are not too small

DEMO_PROB = 0.5
CUT_DEMO_PROB_FRAMES = 500000        # After how many frames, demo probability will be dropped to 1%

SAVES_DIR = "saves"

class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return (observation[0].transpose(2, 0, 1), observation[1])
    

class ResizeObservation(gym.ObservationWrapper):
    def __init__(self, env, new_shape=(84, 84, 3)):
        super(ResizeObservation, self).__init__(env)
        self.new_shape = new_shape
        self.observation_space = gym.spaces.Box(
            low=0, high=1.0, shape=new_shape, dtype=np.float32)
        self.image_count = 0

    def observation(self, observation):
        image, text = observation
        resized_image = cv2.resize(image, (self.new_shape[1], self.new_shape[0]), interpolation=cv2.INTER_AREA)

        # Save the image
        # self.save_image(resized_image, text)

        normalized_image = resized_image / 255.0  # Normalize image to [0, 1]
        return (normalized_image, text)

    def save_image(self, image, text):
        self.image_count += 1
        filename = f"{self.image_count}_{text}.png"
        filepath = os.path.join(filename)

        # Convert from BGR to RGB
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Save using PIL
        Image.fromarray(image_rgb).save(filepath)
        print(f"Saved image: {filepath}")



if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    parser.add_argument("--cuda", default=True, action='store_true', help="CUDA mode")
    parser.add_argument("--port-ofs", type=int, default=0, help="Offset for container's ports, default=0")
    parser.add_argument("--env", default=ENV_NAME, help="Environment name to solve, default=" + ENV_NAME)
    parser.add_argument("--demo", help="Demo dir to load. Default=No demo")
    parser.add_argument("--host", default='localhost', help="Host with docker containers")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    env_name = args.env
    if not env_name.startswith('wob.mini.'):
        env_name = "wob.mini." + env_name

    name = env_name.split('.')[-1] + "_" + args.name
    writer = SummaryWriter(comment="-wob_click_mm_" + name)
    saves_path = os.path.join(SAVES_DIR, name)
    os.makedirs(saves_path, exist_ok=True)

    demo_samples = True

    env = TransposeObservation(ResizeObservation(ImageTextGame(image_dir=r"D:\Projects\datasets\gym\wordVision", font_paths=[r"D:\Projects\datasets\gym\font\simsun.ttc"], render_mode="server")))
    demo_env = TransposeObservation(ResizeObservation(ImageTextGame(image_dir=r"D:\Projects\datasets\gym\wordVision", font_paths=[r"D:\Projects\datasets\gym\font\simsun.ttc"], render_mode="server")))

    net = model_vnc.ModelMultimodal(env.observation_space.shape, env.action_space.n).to(device)
    print(net)
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-8)

    preprocessor = model_vnc.MultimodalPreprocessor(device=device)
    agent = ptan.agent.PolicyAgent(lambda x: net(x)[0], device=device,
                                   apply_softmax=True, preprocessor=preprocessor)
    exp_source = ptan.experience.ExperienceSourceFirstLast(
        [env], agent, gamma=GAMMA, steps_count=REWARD_STEPS)

    start_idx = 0
    # 增加加载模型的代码
    if os.path.exists(saves_path) and len(os.listdir(saves_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(saves_path)),
                             key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(saves_path, checkpoints[-1]), map_location=device, weights_only=False)
        start_idx = checkpoint['start_idx']
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        preprocessor.load(os.path.join(saves_path, "token_ids") + ".pre")
        print("加载模型成功")

    best_reward = None
    with common.RewardTracker(writer) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            batch = []
            for step_idx, exp in enumerate(exp_source):
                rewards_steps = exp_source.pop_rewards_steps()
                if rewards_steps:
                    rewards, steps = zip(*rewards_steps)
                    tb_tracker.track("episode_steps", np.mean(steps), step_idx)

                    mean_reward = tracker.reward(np.mean(rewards), step_idx)
                    if mean_reward is not None:
                        if best_reward is None or mean_reward > best_reward:
                            if best_reward is not None:
                                name = "best_%.3f_%d" % (mean_reward, step_idx)
                                fname = os.path.join(saves_path, name)
                                torch.save(net.state_dict(), fname + ".dat")
                                preprocessor.save(fname + ".pre")
                                print("Best reward updated: %.3f -> %.3f" % (best_reward, mean_reward))
                            best_reward = mean_reward
                            preprocessor.save(os.path.join(saves_path, "token_ids") + ".pre")
                        checkpoint = {
                            "net": net.state_dict(),
                            "optimizer": optimizer.state_dict(),
                            "start_idx": start_idx,
                        }
                        common.save_checkpoints(start_idx + step_idx, checkpoint, saves_path, "a2c", keep_last=5)
                batch.append(exp)
                if len(batch) < BATCH_SIZE:
                    continue

                if step_idx > CUT_DEMO_PROB_FRAMES:
                    DEMO_PROB = 0.01
                if demo_samples and random.random() < DEMO_PROB:
                    model_vnc.train_demo(net, optimizer, demo_env, writer, step_idx,
                                         preprocessor=preprocessor,
                                         device=device)

                states_v, actions_t, vals_ref_v = \
                    common.unpack_batch(batch, net, last_val_gamma=GAMMA ** REWARD_STEPS,
                                        device=device, states_preprocessor=preprocessor)
                batch.clear()

                optimizer.zero_grad()
                logits_v, value_v = net(states_v)

                loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)

                log_prob_v = F.log_softmax(logits_v, dim=1)
                adv_v = vals_ref_v - value_v.detach()
                log_prob_actions_v = adv_v * log_prob_v[range(BATCH_SIZE), actions_t]
                loss_policy_v = -log_prob_actions_v.mean()

                prob_v = F.softmax(logits_v, dim=1)
                entropy_loss_v = ENTROPY_BETA * (prob_v * log_prob_v).sum(dim=1).mean()

                loss_v = entropy_loss_v + loss_value_v + loss_policy_v
                loss_v.backward()
                nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
                optimizer.step()

                tb_tracker.track("advantage", adv_v, step_idx)
                tb_tracker.track("values", value_v, step_idx)
                tb_tracker.track("batch_rewards", vals_ref_v, step_idx)
                tb_tracker.track("loss_entropy", entropy_loss_v, step_idx)
                tb_tracker.track("loss_policy", loss_policy_v, step_idx)
                tb_tracker.track("loss_value", loss_value_v, step_idx)
                tb_tracker.track("loss_total", loss_v, step_idx)
                tb_tracker.track("dict_size", len(preprocessor), step_idx)

    pass
