#coding=UTF-8
# 入口文件绝对引用（最高层）
import os
import time
from typing import cast

import cv2
import gym
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import torch
from numpy.core.numeric import flatnonzero
from PIL import Image
from tqdm import trange

# 引入下层的包
# sth. in configpy should be integrated into the arg loader
import config as configpy
import Dagger
import model.args as modelargs
import utils
from arguments import get_args_parser
from Dagger import (ACTION_TO_CLS_DICT, CLS_TO_ACTION_DICT, DUPLICATE_LAST,
                    TERMINATE, DaggerAgent, ExampleAgent, MyAgent)
from model.data_helper import ImageFactory, IterableImageDataset
from model.resnet import ModifiedResNet
from model.trainer import Trainer, tensor_to_PIL

args = None


def plot(record, num_examples):
    plt.figure()
    fig, ax = plt.subplots()
    ax.plot(record['steps'], record['mean'], color='blue', label='reward')
    ax.fill_between(record['steps'],
                    record['min'],
                    record['max'],
                    color='blue',
                    alpha=0.2)
    ax.set_xlabel('number of steps')
    ax.set_ylabel('Average score per episode')
    ax1 = ax.twinx()
    ax1.plot(record['steps'], record['query'], color='red', label='query')
    ax1.set_ylabel('queries')
    reward_patch = mpatches.Patch(lw=1,
                                  linestyle='-',
                                  color='blue',
                                  label='score')
    query_patch = mpatches.Patch(lw=1,
                                 linestyle='-',
                                 color='red',
                                 label='query')
    patch_set = [reward_patch, query_patch]
    ax.legend(handles=patch_set)
    fig.savefig(configpy.TASK_DIR +
                'performance_over_{}.png'.format(num_examples))


# the wrap is mainly for speed up the game
# the agent will act every num_stacks frames instead of one frame
class Env(object):
    def __init__(self, env_name, num_stacks):
        self.env = gym.make(env_name)
        # num_stacks: the agent acts every num_stacks frames
        # it could be any positive integer
        self.num_stacks = num_stacks
        self.observation_space = self.env.observation_space
        self.action_space = self.env.action_space

    def step(self, action):
        total_reward = 0
        for stack in range(self.num_stacks):
            obs_next, reward, done, info = self.env.step(action)
            total_reward += reward
            if done:
                self.env.reset()
                return obs_next, total_reward, done, info
        return obs_next, total_reward, done, info

    def reset(self):
        return self.env.reset()


def main():
    # load hyper parameters
    # args = get_args()

    trainer = Trainer(configs=args)
    modified_ResNet = ModifiedResNet()
    num_updates = int(args.num_frames // args.num_steps)
    start = time.time()
    record = {'steps': [0], 'max': [0], 'mean': [0], 'min': [0], 'query': [0]}
    # query_cnt counts queries to the expert
    query_cnt = 0

    # environment initial
    envs = Env(args.env_name, args.num_stacks)
    # action_shape is the size of the discrete action set, here is 18
    # Most of the 18 actions are useless, find important actions
    # in the tips of the homework introduction document
    action_shape = envs.action_space.n
    # observation_shape is the shape of the observation
    # here is (210,160,3)=(height, weight, channels)
    observation_shape = envs.observation_space.shape
    print(action_shape, observation_shape)

    # agent initial
    # you should finish your agent with DaggerAgent
    # e.g. agent = MyDaggerAgent()
    agent = MyAgent(modified_ResNet, args.CLASS_TXT_PATH)

    # You can play this game yourself for fun
    if args.play_game:
        # play the game myself
        obs = envs.reset()
        while True:
            img = Image.fromarray(obs)
            img.save(configpy.TASK_DIR + "imgs/" + str('screen') + '.jpeg')
            agent_action = Dagger.get_action()

            while agent_action < 0 or agent_action >= action_shape:
                agent_action = int(input('re-input action'))
                # AHA env.step do the job!
            obs_next, reward, done, _ = envs.step(agent_action)
            obs = obs_next
            if done:
                print("******DONE******")
                obs = envs.reset()

    # Initialize
    # data_set = {'data': [], 'label': []}

    img_path = None
    all_labels_txt_path = os.path.abspath(configpy.LABEL_PATH)
    total_num_steps = 0

    for i_traj in range(num_updates):
        # 暂时没用
        data_this_round = {'data': [], 'label': []}
        obs = envs.reset()
        # if i == 0:
        #     agent.update(
        #         imgs_labels_txt_filepath=
        #         "/home/zhaoxiaoyu/courses/rl/assignments/RL_HW1_Dagger/Dagger/model/play_task/1016_2250/label.txt",
        #         trainer=trainer)
        #     continue
        last_img_path = None
        last_expert_action = None
        for step in range(args.num_steps):
            # show and save current observation
            img = Image.fromarray(obs)
            # ************
            # img_array = Image.fromarray(obs)
            # img_to_select_tensor = ImageFactory.image_to_tensor(
            #     img_array=img_array)
            # img_to_select_tensor = torch.Tensor(
            #     img_to_select_tensor).unsqueeze(0)
            # img.save("./test11.jpeg")

            # img_reload_array = cv2.imread("./test11.jpeg", cv2.IMREAD_COLOR)
            # img_reload_array = cv2.cvtColor(img_reload_array, cv2.COLOR_BGR2RGB)
            # img_reload_tensor = IterableImageDataset._img_tensor_transformer(img_reload_array)

            # img_reload_array = np.asarray(Image.open("./test11.jpeg"))
            # img_reload_tensor = IterableImageDataset._img_tensor_transformer(
            #     img_reload_array)

            # print("RAW", img_to_select_tensor)
            # print("RELOAD", img_reload_tensor)

            # img_to_select = tensor_to_PIL(img_to_select_tensor)
            # img_reload = tensor_to_PIL(img_reload_tensor)

            # img_to_select.save("./to_select.jpeg")
            # img_reload.save("./reload_PIL.jpeg")

            # raise NotImplementedError("asdasdasda")
            # ************

            if configpy.ENABLE_SAVE_IMG:
                if i_traj == 0:
                    # save the expert's strategy
                    expert_init_data_path = configpy.TASK_DIR + "expert/" + str(
                        step) + ".jpg"
                    utils.path_check(expert_init_data_path)
                    img.save(expert_init_data_path)
                img_rel_path = "imgs/traj_{0}/".format(i_traj) + str(
                    step) + '.jpg'
                img_path = configpy.TASK_DIR + img_rel_path
                utils.path_check(img_path)
                img.save(img_path)
                img.save(configpy.IMG_DISPLAY_PATH)

            # Sample action
            expert_action, action_dup_num = Dagger.get_action()
            print("Expert chose to {0}, 原始输出 {1}".format(
                ACTION_TO_CLS_DICT.get(expert_action), expert_action))
            while expert_action == Dagger.DUPLICATE_LAST:
                # 重复上次的操作
                for _ in range(action_dup_num):
                    utils.label_file_append(last_img_path, last_expert_action)
                expert_action, action_dup_num = Dagger.get_action()

            if expert_action != Dagger.TERMINATE and expert_action != Dagger.DUPLICATE_LAST:
                # the trajectory seems to be bad. However, the already labeled
                # 保存一个可执行的动作
                agent_action = None
                # give the label
                if i_traj == 0:
                    agent_action = expert_action
                else:
                    epsilon = configpy.EXPERT_COEF**i_traj
                    # epsilon = 0.9
                    if np.random.rand() < epsilon:
                        # we choose a random action
                        # DOUBT No sense?
                        # agent_action = envs.action_space.sample()
                        agent_action = expert_action
                    else:
                        # let the agent choose
                        # Agent can choose to follow the expert to eliminate the useless states
                        if args.enable_save_grad_cam == True:
                            pass
                        agent_action = agent.select_action(obs)
                    print("Agent chose to {0}, 原始输出 {1}".format(
                        ACTION_TO_CLS_DICT.get(agent_action), agent_action))

                for _ in range(action_dup_num):
                    utils.label_file_append(img_rel_path, expert_action)

                last_img_path = img_rel_path
                last_expert_action = expert_action

                data_this_round['data'].append(obs)
                data_this_round['label'].append(expert_action)

                obs_next, reward, done, _ = envs.step(agent_action)
                obs = obs_next
                total_num_steps += 1

                if done:
                    envs.reset()
                    break
                    # the human watch aside and label current obs., while the agent do its own way in the program
                    # NOT NECESSARY to append all of the obs.

            # 注意，如果有特殊标签，Env.step是不会执行的
            elif expert_action == Dagger.TERMINATE:
                break

        num_examples = agent.update(
            imgs_labels_txt_filepath=all_labels_txt_path,
            trainer=trainer,
            increment_trainning=args.increment_trainning)

        # Evaluate
        if (i_traj + 1) % args.log_interval == 0:
            # total_num_steps = (i_traj + 1) * args.num_steps
            obs = envs.reset()
            reward_episode_set = []
            reward_episode = 0
            # evaluate your model by testing in the environment
            for step in trange(args.test_steps):
                agent_action = agent.select_action(obs, enable_print=False)
                # you can render to get visual results
                obs_next, reward, done, _ = envs.step(agent_action)
                # envs.env.render()
                # input("press any key to step")
                reward_episode += reward
                obs = obs_next
                if done:
                    reward_episode_set.append(reward_episode)
                    reward_episode = 0
                    envs.reset()

            if len(reward_episode_set) == 0:
                reward_episode_set.append(reward_episode)

            _mean = np.mean(
                reward_episode_set) if len(reward_episode_set) != 0 else 0
            _min = np.min(
                reward_episode_set) if len(reward_episode_set) != 0 else 0
            _max = np.max(
                reward_episode_set) if len(reward_episode_set) != 0 else 0
            end = time.time()
            print(
                "TIME {} Updates {}, num timesteps {}, FPS {} \n query {}, avrage/min/max reward {:.1f}/{:.1f}/{:.1f}"
                .format(
                    time.strftime("%Hh %Mm %Ss",
                                  time.gmtime(time.time() - start)),
                    i_traj, total_num_steps,
                    int(total_num_steps / (end - start)), query_cnt, _mean,
                    _min, _max))
            record['steps'].append(total_num_steps)
            record['mean'].append(_mean)
            record['max'].append(_max)
            record['min'].append(_min)
            record['query'].append(query_cnt)
            # with open(all_labels_txt_path,'r',encoding='utf-8') as f:
            #     num_examples = len(f.readlines())
            plot(record, num_examples=num_examples)


def test(batch_size=32):
    model = ModifiedResNet()
    dict = torch.load(
        "/home/zhaoxiaoyu/courses/rl/assignments/RL_HW1_Dagger/Dagger/model/trainning_tasks/1019_1644_COEF_0.8_NOINC_/model.pt"
    )
    state_dict = dict['model_state_dict']
    model.load_state_dict(state_dict=state_dict)
    model.eval()
    print(model)

    test_dataloader, _, _, _ = ImageFactory(
        cls_txt_path=
        "/home/zhaoxiaoyu/courses/rl/assignments/RL_HW1_Dagger/Dagger/model/config/classes.txt"
    ).get_iterable_image_dataloaders(
        imgs_and_labels_txt_path=
        "/home/zhaoxiaoyu/courses/rl/assignments/RL_HW1_Dagger/Dagger/model/play_task/1019_1644_COEF_0.8_NOINC_/label.txt",
        batch_size=batch_size)
    for batch_idx, example in enumerate(test_dataloader):
        device = "cuda" if torch.cuda.is_available() else "cpu"
        model.eval()
        inputs, labels = example
        outputs = model(inputs)
        labels = labels.to(device)

        output_argmaxed = outputs.argmax(dim=1)
        # Test
        # raise NotImplementedError("查看数据集对不对")
        correct = (output_argmaxed == labels).float().sum()
        acc = correct / inputs.size()[0]

        output_classes = [
            int(Dagger.IDX_TO_CLS_DICT.get(_.item())) for _ in output_argmaxed
        ]
        output_actions = [
            Dagger.ACTION_TO_CLS_DICT.get(_) for _ in output_classes
        ]
        labels_classes = [
            int(Dagger.IDX_TO_CLS_DICT.get(_.item())) for _ in labels
        ]
        labels_actions = [
            Dagger.ACTION_TO_CLS_DICT.get(_) for _ in labels_classes
        ]

        print("output", output_actions)
        print("labels", labels_actions)

        print("iter : {0}, acc : {1:.2f}".format(batch_idx, acc.item()))


if __name__ == "__main__":
    args = get_args_parser()
    args = modelargs.parse_args_and_yaml(args)

    configpy.EXPERT_COEF = 0.7
    configpy.TASK_SUFFIX = "_COEF_" + "{:.2f}".format(
        configpy.EXPERT_COEF) + "_ResNet_NOBN_INC_" + str(
            args.increment_trainning) + "_SINGLE"
    configpy.update_taskname()
    main()
