#!/usr/bin/env python3
'''
实现基于trpo的训练方法
'''
import gymnasium as gym
import argparse
import cv2
import os

from lib import model
from PIL import Image

import numpy as np
import torch
from game import ImageTextGame

ENV_ID = "ImageTextGame-v0"
class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return (observation[0].transpose(2, 0, 1), observation[1])


class ResizeObservation(gym.ObservationWrapper):
    def __init__(self, env, new_shape=(84, 84, 3)):
        super(ResizeObservation, self).__init__(env)
        self.new_shape = new_shape
        self.observation_space = gym.spaces.Box(
            low=0, high=1.0, shape=new_shape, dtype=np.float32)
        self.image_count = 0

    def observation(self, observation):
        image, text = observation
        resized_image = cv2.resize(image, (self.new_shape[1], self.new_shape[0]), interpolation=cv2.INTER_AREA)

        # Save the image
        # self.save_image(resized_image, text)

        normalized_image = resized_image / 255.0  # Normalize image to [0, 1]
        return (normalized_image, text)

    def save_image(self, image, text):
        self.image_count += 1
        filename = f"{self.image_count}_{text}.png"
        filepath = os.path.join(filename)

        # Convert from BGR to RGB
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        # Save using PIL
        Image.fromarray(image_rgb).save(filepath)
        print(f"Saved image: {filepath}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-m", "--model", required=True, help="Model file to load")
    parser.add_argument("-e", "--env", default=ENV_ID, help="Environment name to use, default=" + ENV_ID)
    parser.add_argument("-r", "--record", help="If specified, sets the recording dir, default=Disabled")
    args = parser.parse_args()

    env = TransposeObservation(ResizeObservation(ImageTextGame(image_dir=r"D:\Projects\datasets\gym\wordVision",
                                                               font_paths=[r"D:\Projects\datasets\gym\font\simsun.ttc"],
                                                               render_mode="human")))

    net = model.ModelActor(env.observation_space.shape[0], env.action_space.shape[0])
    net.load_state_dict(torch.load(args.model))

    obs, _ = env.reset()
    total_reward = 0.0
    total_steps = 0
    while True:
        obs_v = torch.FloatTensor(obs)
        mu_v = net(obs_v)
        action = mu_v.squeeze(dim=0).data.numpy()
        action = np.clip(action, -1, 1)
        obs, reward, done, trunc, _ = env.step(action)
        done = done or trunc
        total_reward += reward
        total_steps += 1
        if done:
            break
    print("In %d steps we got %.3f reward" % (total_steps, total_reward))
