#!/usr/bin/env python3
'''
未适配
'''
import gymnasium as gym
import ptan
import numpy as np
import argparse
from tensorboardX import SummaryWriter
import os

import torch
import torch.nn as nn
import torch.nn.utils as nn_utils
import torch.nn.functional as F
import torch.optim as optim

from typing import Any
from lib import common
from collections import deque
import ale_py
from minio import Minio
from minio.error import S3Error
from datetime import timedelta
from PIL import Image
import os
from openai import OpenAI
import time

gym.register_envs(ale_py)
GAMMA = 0.99
LEARNING_RATE = 5e-4
ENTROPY_BETA = 0.01
BATCH_SIZE = 128
NUM_ENVS = 50

REWARD_STEPS = 4
CLIP_GRAD = 0.5

SAVE_ITERS = 100


def _group_list(items, lens):
    """
    Unflat the list of items by lens
    反平铺队列，也就是将原先list 一维的数据，跟进lens进行分割，实现二维的队列
    [...] => [[.], [[.], [.]], .]
    :param items: list of items
    :param lens: list of integers
    :return: list of list of items grouped by lengths
    """
    res = []
    cur_ofs = 0
    for g_len in lens:
        res.append(items[cur_ofs:cur_ofs+g_len])
        cur_ofs += g_len
    return res


class ExperienceSourceRAW:
    """
    Simple n-step experience source using single or multiple environments
    简单的存储n步的经验采集样本，用于单个或者多个环境中

    Every experience contains n list of Experience entries
    每个经验样本集都包含n步的经验
    """
    def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False):
        """
        Create simple experience source
        :param env: environment or list of environments to be used 环境信息或者环境信息列表
        :param agent: callable to convert batch of states into actions to take 代理信息
        :param steps_count: count of steps to track for every experience chain 需要追溯多少步以前的记录
        :param steps_delta: how many steps to do between experience items todo
        :param vectorized: support of vectorized envs from OpenAI universe 
        """
        # 判断经验传入的参数类型是否正确
        # 并存储到成员变量
        assert isinstance(env, (gym.Env, gym.vector.VectorEnv, list, tuple))
        assert isinstance(agent, ptan.agent.BaseAgent)
        assert isinstance(steps_count, int)
        assert steps_count >= 1
        assert isinstance(vectorized, bool)
        # self.pool: 存储游戏环境
        if isinstance(env, (list, tuple)):
            self.pool = env
        else:
            self.pool = [env]
        self.agent = agent
        self.steps_count = steps_count
        self.steps_delta = steps_delta
        self.total_rewards = []
        self.total_steps = []
        self.vectorized = vectorized

    def __iter__(self):
        states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []
        env_lens = []
        for env in self.pool:
            obs, obs_info = env.reset()
            if self.vectorized:
                obs_len = len(obs)
                states.extend(obs)
            else:
                obs_len = 1
                states.append(obs)
            env_lens.append(obs_len)
            
            # 遍历本次环境观测的结果
            for _ in range(obs_len):
                histories.append(deque(maxlen=self.steps_count)) # 创建对应环境观测结果历史缓存队列
                cur_rewards.append(0.0)  # 存储最初是状态下的激励，为0
                cur_steps.append(0) # 存储当前行走的部署，为0
                agent_states.append(self.agent.initial_state()) # 存储代理状态，reset环境时，代理状态是初始状态
        
        # 遍历索引
        # 从里这开始，应该就是尝试运行游戏了
        iter_idx = 0
        while True: 
            actions = [None] * len(states) # todo
            states_input = []
            states_indices = []
            # 遍历每一次的存储的观测状态
            # 对于非矢量环境来说，idx仅仅对应一个当前状态，但是对于矢量环境来说，idx对应当前获取的每个一观测值的索引
            # 这一个大循环的作用应该是，根据环境执行得到执行的动作结果
            # todo 有一个问题，矢量环境获取的多个观测结果这里怎么分辨
            for idx, state in enumerate(states):
                if state is None:
                    # 如果状态是空的，则使用环境进行随机选择一个动作执行， 另外这里假设的所有的环境都有相同的动作空间，
                    # 所以这里仅使用0索引环境进行随机动作采样
                    actions[idx] = self.pool[0].action_space.sample()  # assume that all envs are from the same family
                else:
                    # 如果状态非空，则将当前的存储在states环境观测值存储在states_input中
                    # 并存储当前的索引
                    states_input.append(state)
                    states_indices.append(idx)
            if states_input:
                # 如果观测的状态列表非空，则将状态输入的神经网络环境代理中，获取将要执行的动作
                # 而agent_staes根据源码，发现并未做处理
                states_actions, new_agent_states = self.agent(states_input, agent_states)
                # 遍历每一个状态所要执行的动作
                for idx, action in enumerate(states_actions):
                    # 获取当前动作对应的状态的索引位置，有上面106行的代码可知
                    g_idx = states_indices[idx]
                    # 将执行的动作存储在与状态相对应的索引上
                    actions[g_idx] = action
                    # 代理状态（todo 作用）
                    agent_states[g_idx] = new_agent_states[idx]
            # 将动作按照原先存储的每个环境得到的观测结果长度，按照环境数组进行重新分割分组
            grouped_actions = _group_list(actions, env_lens)
            
            # 因为存在一个大循环，存储每个环境的起始索引位置
            global_ofs = 0
            # 遍历每一个环境
            # 这一个大循环是将上一个循环中得到的执行动作应用到实际的环境中
            for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):
                if self.vectorized:
                    # 这里action_n是一个list，也就是说矢量环境的输入的一个多维
                    # 如果是矢量的环境，则直接执行动作获取下一个状态，激励，是否结束等观测值
                    next_state_n, r_n, is_done_n, truncated, _ = env.step(action_n)
                    is_done_n = np.logical_or(is_done_n, truncated)
                else:
                    # 如果不是矢量环境，则需要将动作的第一个动作发送到env中获取相应的观测值（这里之所以是[0]，因为为了和矢量环境统一，即时是一个动作也会以列表的方式存储）
                    next_state, r, is_done, truncated, _ = env.step(action_n[0])
                    is_done = is_done or truncated
                    # 这个操作是为了和矢量环境统一
                    next_state_n, r_n, is_done_n = [next_state], [r], [is_done]
                
                # 遍历每一次的动作所得到的下一个状态、激励、是否结束
                for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):
                    # 获取当前缓存的索引位置
                    idx = global_ofs + ofs
                    # 获取初始环境的状态
                    # 因为action_n存储的就是每一个状态下所执行的动作，所以这里直接使用idx提取对应的状态
                    state = states[idx]
                    # 获取一个历史队列，此时队列为空
                    history = histories[idx]
                    agent_state = agent_states[idx]
                    
                    # 这里利用的idx来区分每一个状态执行的动作所对应的激励值
                    # 将获取的激励值存储在缓存中
                    cur_rewards[idx] += r
                    # 将当前状态以及执行的动作，执行的步骤次数存在起来
                    cur_steps[idx] += 1
                    # 如果状态非空，则（当前状态，所执行的动作对应的历史缓存队列）将当前状态存储在history中
                    # 所以一个样本可能就是这样对应一个队列数据
                    if state is not None:
                        history.append((state, action, r, is_done, next_state, agent_state))
                    # 如果达到了采集的步数并且遍历索引达到了两个经验样本的指定差值，则将样本返回，待外界下一次继续获取时，从这里继续执行
                    if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:
                        yield tuple(history)
                    # 更新states，表示当前动作执行后状态的改变
                    # 将动作设置为动作执行后的下一个状态，因为idx表示当前运行的环境状态的变更
                    states[idx] = next_state
                    if is_done:
                        # 如果游戏结束，如果存储的历史数据小于指定的长度，则直接返回
                        # in case of very short episode (shorter than our steps count), send gathered history
                        if 0 < len(history) < self.steps_count:
                            yield tuple(history)
                        # generate tail of history
                        # 弹出最左侧的历史数据，返回给外部获取数据
                        while len(history) > 1:
                            history.popleft()
                            yield tuple(history)
                        # 将当前状态+动作执行后得到的激励存储在total_rewards队列中
                        self.total_rewards.append(cur_rewards[idx])
                        # 这个当前状态+动作执行的次数也存储起来
                        self.total_steps.append(cur_steps[idx])
                        # 重置状态
                        cur_rewards[idx] = 0.0
                        cur_steps[idx] = 0
                        # vectorized envs are reset automatically
                        states[idx] = env.reset()[0] if not self.vectorized else None
                        agent_states[idx] = self.agent.initial_state()
                        history.clear()
                        
                # 将起始索引设置为下一个环境的起始位置
                global_ofs += len(action_n)
            # 遍历索引+1
            iter_idx += 1

    def pop_total_rewards(self):
        """
        返回所有采集的样本，并清空缓存
        """
        r = self.total_rewards
        if r:
            self.total_rewards = []
            self.total_steps = []
        return r

    def pop_rewards_steps(self):
        res = list(zip(self.total_rewards, self.total_steps))
        if res:
            self.total_rewards, self.total_steps = [], []
        return res


class TransposeObservation(gym.ObservationWrapper):
    def __init__(self, env=None):
        super(TransposeObservation, self).__init__(env)

    def observation(self, observation):
        # 将观察从 (H, W, C) 转换为 (C, H, W)
        return observation.transpose(2, 0, 1)


class FireResetEnv(gym.Wrapper):
    def __init__(self, env=None):
        """For environments where the user need to press FIRE for the game to start."""
        super(FireResetEnv, self).__init__(env)
        # 以下可知，一些游戏存在FIRE的动作，并且存在FIRE动作的游戏其游戏动作执行有三个以上
        assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
        assert len(env.unwrapped.get_action_meanings()) >= 3

    def step(self, action):
        return self.env.step(action)

    def reset(self, seed: int | None = None, options: dict[str, Any] | None = None):
        # 这里之所以尝试重置后尝试各种动作，是因为不知道哪个是FIRE，继续游戏，所以一个一个尝试
        # 如果不小心游戏结束了，则继续重置
        # 假设游戏继续游戏的按钮在前3
        self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(1)
        if done:
            self.env.reset(seed=seed, options=options)
        obs, _, done, _, info = self.env.step(2)
        if done:
            self.env.reset(seed=seed, options=options)
        return obs, info

class AtariA2C(object):
    def __init__(self, input_shape, n_actions):
        self.minio_url = "http://passk.chchy.vip:19013"
        self.access_key = "BglrQkwe6NFpiC7SAONk"
        self.secret_key = "6LwBnBB61KQ5CNkwYDSPlPOxvZQkSVIxE84953hB"
        self.bucket_name = "qwen-bucket"

        # 初始化 MinIO 客户端
        self.client = Minio(
            endpoint=self.minio_url.replace("http://", "").replace("https://", ""),
            access_key=self.access_key,
            secret_key=self.secret_key,
            secure=False  # 如果使用 HTTPS，设置为 True
            )
        
        self.img_queue = deque(maxlen=4)

        os.environ['DASHSCOPE_API_KEY'] = "sk-3b8a5b48191b4399b246f872b2b4ef90"
        self.qw_client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )


    def __call__(self, states, last_rewards = None):
        new_state_img = AtariA2C.convert_state_as_image(states)
        if len(self.img_queue) >= 4:
            first_img = self.img_queue[0]
            self.client.remove_object(self.bucket_name, first_img['name'])
        self.img_queue.append({
            'name': hashlib.md5(new_state_img).hexdigest() + ".png",
            'data': new_state_img
        })

        from io import BytesIO
        import hashlib
        img_byte_arr = BytesIO()
        self.img_queue[-1]['data'].save(img_byte_arr, format='PNG')
        img_byte_arr.seek(0)

        self.client.put_object(self.bucket_name, self.img_queue[-1]['name'], img_byte_arr, length=img_byte_arr.getbuffer().nbytes)
        url = self.client.presigned_get_object(self.bucket_name, self.img_queue[-1]['name'], expires=timedelta(seconds=60))
        self.img_queue[-1]['url'] = url

        messages = []
        messages.append({
            "role": "system",
            "content": [
                {"type": "text", "text": "下列图片是atari casino游戏的游戏状态，你需要根据图片的内容来进行决策，你的决策动作有0：NOOP；1：FIRE；2：UP；3：DOWN(GYM中的动作编号)，请根据图片游戏状态、动作、以及奖励反馈来进行动作决策。只需要回答的动作编号数字，其他的不要回答。"}
            ]
        })

        if last_rewards != None:
            messages.append({
                "role": "user",
                "content": [
                    {"type": "text", "text": "上一次的决策动作的奖励为：{}".format(last_rewards)}
                ]
            })

        for img_info in self.img_queue:
            messages.append({
                "role": "user",
                "content": [
                    {"type": "image_url", "image_url": {"url": img_info['url']}},
                    {} if 'response' not in img_info else {"type": "text", "text": img_info['response']}
                ]
            })

        completion = self.qw_client.chat.completions.create(
            model='qwen-vl-plus',
            messages=messages)
        
        self.img_queue[-1]['response'] = completion.choices[0].message.content
        print("response:", self.img_queue[-1]['response'])

        return int(completion.choices[0].message.content)


    @staticmethod
    def convert_state_as_image(state):
        """Save the state as a PNG image."""
        # Ensure the state is a NumPy array with dtype uint8
        if state.dtype != np.uint8:
            # If state is float, scale to [0, 255] and convert to uint8
            state = np.uint8(255 * (state - state.min()) / (state.max() - state.min()))
        # Remove extra dimensions if necessary
        state = state.squeeze()
        # Create image
        img = Image.fromarray(state)
        # Convert image to mode 'L' (grayscale) if it's not compatible
        if img.mode not in ('L', 'RGB'):
            img = img.convert('L')

        


def unpack_batch(batch, net, device='cpu'):
    """
    Convert batch into training tensors
    :param batch:
    :param net:
    :return: states variable, actions tensor, reference values variable
    """
    states = []
    actions = []
    rewards = []
    not_done_idx = [] # 非结束的游戏数据索引，该索引记录对应batch，states，actions，rewards
    last_states = [] # 记录采样中的执行动作后的状态，仅记录游戏非结束状态下的索引
    for idx, exp in enumerate(batch):
        states.append(np.asarray(exp.state))
        actions.append(int(exp.action))
        rewards.append(exp.reward)
        if exp.last_state is not None:
            not_done_idx.append(idx)
            last_states.append(np.asarray(exp.last_state))
    states_v = torch.FloatTensor(np.asarray(states)).to(device)
    actions_t = torch.LongTensor(actions).to(device)
    rewards_np = np.array(rewards, dtype=np.float32)
    if not_done_idx:
        last_states_v = torch.FloatTensor(np.asarray(last_states)).to(device)
        last_vals_v = net(last_states_v)[1]
        last_vals_np = last_vals_v.data.cpu().numpy()[:, 0]
        rewards_np[not_done_idx] += GAMMA ** REWARD_STEPS * last_vals_np

    ref_vals_v = torch.FloatTensor(rewards_np).to(device)   
    return states_v, actions_t, ref_vals_v


def wrap_dqn(env, stack_frames=4, episodic_life=True, reward_clipping=True):
    return env


def test_model(env, net, device, episodes=5):
    total_reward = 0.0
    for _ in range(episodes):
        noop_action_count = 0
        pre_action = -1
        obs, _ = env.reset()
        while True:
            obs_v = ptan.agent.default_states_preprocessor([obs]).to(device)
            logits_v, _ = net(obs_v)
            probs_v = F.softmax(logits_v, dim=1)
            probs = probs_v.data.cpu().numpy()
            action = np.argmax(probs)
            if action == 0 and pre_action == action:  # Noop
                noop_action_count += 1
                if noop_action_count > 30:
                    break
            else:
                noop_action_count = 0
            pre_action = action
            obs, reward, done, trunc, _ = env.step(action)
            total_reward += reward
            if done or trunc:
                break
    return total_reward / episodes


def optimized_states_preprocessor(states):
    """
    Convert list of states into the form suitable for model.
    :param states: list of numpy arrays with states
    :return: torch.Tensor
    """
    if len(states) == 1:
        np_states = np.expand_dims(states[0], 0)
    else:
        np_states = np.asarray([np.asarray(s) for s in states])
    return torch.from_numpy(np_states)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", default="breakout", required=False, help="Name of the run")
    args = parser.parse_args()

    save_path = os.path.join("saves", "qwen-" + args.name)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    envs = wrap_dqn(gym.make("ALE/Carnival-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    test_env = wrap_dqn(gym.make("ALE/Carnival-v5", obs_type='rgb', frameskip=4, repeat_action_probability=0.0), episodic_life=False)
    writer = SummaryWriter(comment="-a2c_" + args.name)

    net = AtariA2C(envs[0].observation_space.shape, envs[0].action_space.n)
    print(net)

    agent = ptan.agent.PolicyAgent(lambda x: net(x)[0], apply_softmax=True, preprocessor=optimized_states_preprocessor)
    exp_source = ExperienceSourceRAW(envs, agent, gamma=GAMMA, steps_count=1)
    optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE, eps=1e-3)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10000, gamma=0.9)

    batch = []
    best_reward = 0
    frame_idx = 0

    if os.path.exists(save_path) and len(os.listdir(save_path)) > 0:
        # 增加加载模型的代码
        checkpoints = sorted(filter(lambda x: "epoch" in x, os.listdir(save_path)), key=lambda x: int(x.split('_')[2].split('.')[0]))
        checkpoint = torch.load(os.path.join(save_path, checkpoints[-1]), map_location=device, weights_only=False)
        frame_idx = checkpoint['frame_idx']
        net.load_state_dict(checkpoint['net'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print("加载模型成功")

    with common.RewardTracker(writer, stop_reward=700) as tracker:
        with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
            for step_idx, exp in enumerate(exp_source):
                batch.append(exp)

                new_rewards = exp_source.pop_total_rewards()
                if new_rewards:
                    if tracker.reward(new_rewards[0], step_idx + frame_idx):
                        break

                if len(batch) < BATCH_SIZE:
                    continue


                states_v, actions_t, vals_ref_v = unpack_batch(batch, net, device=device)
                batch.clear()

                optimizer.zero_grad()
                logits_v, value_v = net(states_v)
                loss_value_v = F.mse_loss(value_v.squeeze(-1), vals_ref_v)

                log_prob_v = F.log_softmax(logits_v, dim=1)
                adv_v = vals_ref_v - value_v.squeeze(-1).detach()
                log_prob_actions_v = adv_v * log_prob_v[range(BATCH_SIZE), actions_t]
                loss_policy_v = -log_prob_actions_v.mean()

                prob_v = F.softmax(logits_v, dim=1)
                entropy_loss_v = ENTROPY_BETA * (prob_v * log_prob_v).sum(dim=1).mean()

                loss_policy_v.backward(retain_graph=True)
                grads = np.concatenate([p.grad.data.cpu().numpy().flatten()
                                        for p in net.parameters()
                                        if p.grad is not None])

                loss_v = entropy_loss_v + loss_value_v
                loss_v.backward()
                nn_utils.clip_grad_norm_(net.parameters(), CLIP_GRAD)
                optimizer.step()
                loss_v += loss_policy_v
                frame_idx += 1
                scheduler.step()

                if frame_idx % 200 == 0:
                    # Test the model
                    test_reward = test_model(test_env, net, device=device, episodes=2)
                    print(f"Test reward: {test_reward:.2f}")
                    common.save_best_model(test_reward, net.state_dict(), save_path, "a2c-best", keep_best=10)

                if frame_idx % SAVE_ITERS == 0:
                    checkpoint = {
                        "net": net.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "frame_idx": frame_idx,
                    }
                    common.save_checkpoints(frame_idx, checkpoint, save_path, "a2c", keep_last=5)


                tb_tracker.track("advantage",       adv_v, step_idx + frame_idx)
                tb_tracker.track("values",          value_v, step_idx + frame_idx)
                tb_tracker.track("batch_rewards",   vals_ref_v, step_idx + frame_idx)
                tb_tracker.track("loss_entropy",    entropy_loss_v, step_idx + frame_idx)
                tb_tracker.track("loss_policy",     loss_policy_v, step_idx + frame_idx)
                tb_tracker.track("loss_value",      loss_value_v, step_idx + frame_idx)
                tb_tracker.track("loss_total",      loss_v, step_idx + frame_idx)
                tb_tracker.track("grad_l2",         np.sqrt(np.mean(np.square(grads))), step_idx + frame_idx)
                tb_tracker.track("grad_max",        np.max(np.abs(grads)), step_idx + frame_idx)
                tb_tracker.track("grad_var",        np.var(grads), step_idx + frame_idx)