import random

import gym
import numpy as np
import time
import matplotlib.pyplot as plt
import torch
import os
from collections import deque, Counter
import copy
from collections import Counter
from matplotlib import animation
from PIL import Image


def preprocess(image):
    """ 预处理 210x160x3 uint8 frame into 6400 (80x80) 1维 float vector """
    image = image[35:195]  # 裁剪
    image = image[::2, ::2, 0]  # 下采样，缩放2倍
    image[image == 144] = 0  # 擦除背景 (background type 1)
    image[image == 109] = 0  # 擦除背景
    image[image != 0] = 1  # 转为灰度图，除了黑色外其他都是白色
    return image.astype(np.float).ravel()  # 打平,(6400,)


def show_image(status):
    status1 = status[35:195]  # 裁剪有效区域

    status2 = status1[::2, ::2, 0]  # 下采样，缩减

    # 观察我们的像素点构成
    def see_color(status):
        allcolor = []
        for i in range(80):
            allcolor.extend(status[i])

        dict_color = Counter(allcolor)
        print("像素点构成: ", dict_color)

    see_color(status2)

    # 观察好像素点后，擦除背景
    def togray(image_in):
        image = image_in.copy()
        image[image == 144] = 0  # 擦除背景 (background type 1)
        image[image == 109] = 0  # 擦除背景
        image[image != 0] = 1  # 转为灰度图，除了黑色外其他都是白色
        return image

    status3 = togray(status2)

    # 可视化我们的操作中间图
    def show_status(list_status):
        fig = plt.figure(figsize=(10, 6), dpi=200)

        plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.3, hspace=0)

        for i in range(len(list_status)):
            plt.subplot(1, len(list_status), i + 1)

            plt.imshow(list_status[i], cmap=plt.cm.binary)

        plt.show()

    show_status([status, status1, status2, status3])


class Model(torch.nn.Module):
    """ 使用全连接网络.
    参数:
        obs_dim (int): 观测空间的维度.
        act_dim (int): 动作空间的维度.
    """

    def __init__(self, obs_dim, act_dim):
        super(Model, self).__init__()
        hid1_size = 256
        hid2_size = 64

        self.fc1 = torch.nn.Linear(obs_dim, hid1_size)
        self.fc2 = torch.nn.Linear(hid1_size, hid2_size)
        self.fc3 = torch.nn.Linear(hid2_size, act_dim)

    def forward(self, obs):
        h1 = torch.nn.functional.relu(self.fc1(obs))
        h2 = torch.nn.functional.relu(self.fc2(h1))
        prob = torch.nn.functional.softmax(self.fc3(h2), dim=-1)
        return prob


# 梯度下降算法
class PolicyGradient():
    def __init__(self, model, lr):
        self.model = model
        self.optimizer = torch.optim.Adam(lr=lr, params=self.model.parameters())

    def predict(self, obs):
        prob = self.model(obs)
        return prob

    def learn(self, obs, action, reward):
        prob = self.model(obs)
        # print("prob: ",prob)
        log_prob = torch.distributions.Categorical(prob).log_prob(action)

        loss = torch.mean(-1 * log_prob * reward)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss


class Agent():
    def __init__(self, algorithm):
        self.alg = algorithm

        if os.path.exists("./savemodel"):
            print("开始从文件加载参数....")
            try:
                self.load()
                print("从文件加载参数结束....")
            except:
                print("从文件加载参数失败，从0开始训练....")

    def sample(self, obs):
        """ 根据观测值 obs 采样（带探索）一个动作
        """
        obs = torch.from_numpy(obs).to(torch.float32)
        prob = self.alg.predict(obs)
        # print("prob:",prob)
        prob = prob.detach().numpy()
        act = np.random.choice(len(prob), 1, p=prob)[0]  # 根据动作概率选取动作
        return act

    def predict(self, obs):
        """ 根据观测值 obs 选择最优动作
        """
        obs = torch.from_numpy(obs).to(torch.float32)
        prob = self.alg.predict(obs)
        act = prob.argmax().numpy()  # 根据动作概率选择概率最高的动作
        return act

    def learn(self, obs, act, reward):
        """ 根据训练数据更新一次模型参数
        """
        act = np.expand_dims(act, axis=-1)
        reward = np.expand_dims(reward, axis=-1)

        obs = torch.from_numpy(obs).to(torch.float32)
        act = torch.from_numpy(act).to(torch.int)
        reward = torch.from_numpy(reward).to(torch.float32)

        # print("gggggggggggggg",obs.shape,act.shape,reward.shape)

        loss = self.alg.learn(obs, act, reward)
        return loss.detach().numpy()

    def save(self):
        torch.save(self.alg.model, './model/PG-Pong_net.pt')
        torch.save(self.alg.model.state_dict(), "./model/opt.pt")

    def load(self):

        # 加载网络参数
        model_state_dict = torch.load('./model/PG-Pong_net.pt')

        self.alg.model.load_state_dict(torch.load("./model/opt.pt"))


# 训练一个episode
def run_train_episode(agent, env):
    obs_list, action_list, reward_list = [], [], []
    obs, _ = env.reset()
    while True:
        obs = preprocess(obs)  # from shape (210, 160, 3) to (6400,)
        obs_list.append(obs)
        action = agent.sample(obs)

        action_list.append(action)

        obs, reward, done, _, info = env.step(action)
        # if reward!=0:
        #     print("reward: ",action)

        reward_list.append(reward)

        if done:
            break
    return obs_list, action_list, reward_list


# 评估 agent, 跑 5 个episode，总reward求平均
def run_evaluate_episodes(agent, env, render=False):
    eval_reward = []
    for i in range(5):
        obs, _ = env.reset()
        episode_reward = 0
        while True:
            obs = preprocess(obs)  # from shape (210, 160, 3) to (6400,)
            action = agent.predict(obs)
            obs, reward, isOver, _, _ = env.step(action)
            episode_reward += reward
            if render:
                env.render()
            if isOver:
                break
        eval_reward.append(episode_reward)
    return np.mean(eval_reward)


def calc_reward_to_go(reward_list, gamma=0.99):
    """calculate discounted reward"""
    reward_arr = np.array(reward_list)
    for i in range(len(reward_arr) - 2, -1, -1):
        # G_t = r_t + γ·r_t+1 + ... = r_t + γ·G_t+1
        reward_arr[i] += gamma * reward_arr[i + 1]

    # normalize episode rewards
    reward_arr -= np.mean(reward_arr)
    reward_arr /= np.std(reward_arr)
    return reward_arr


def save_frames_as_gif(frames, filename):

    #Mess with this to change frame size
    plt.figure(figsize=(frames[0].shape[1]/100, frames[0].shape[0]/100), dpi=300)

    patch = plt.imshow(frames[0])
    plt.axis('off')

    def animate(i):
        patch.set_data(frames[i])

    anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=50)
    anim.save(filename, writer='pillow', fps=60)




model = Model(6400, 6)
model.load_state_dict(torch.load("./model/opt.pt"))

env = gym.make('Pong-v4', render_mode = "rgb_array")

state, _ = env.reset()
frames = []
done = 0
i = 0
reward_list = []
while not done:
    frames.append(env.render())
    obs = preprocess(state)
    obs = torch.Tensor(obs).to(torch.float32)
    prob = model(obs)
    action = prob.argmax().numpy()
    action = random.choice([0, 1, 2, 3, 4, 5])
    next_state, reward, done, _, _ = env.step(action)
    if reward != 0:
        reward_list.append(reward)
        print(i, "   ", reward, done)
    state = next_state
    i += 1

reward_counter = Counter(reward_list)
print(reward_counter)
print("你的得分为：", reward_counter[1.0], '对手得分为：', reward_counter[-1.0])
if reward_counter[1.0] > reward_counter[-1.0]:
    print("恭喜您赢了！！！")
else:
    print("惜败，惜败，训练一下智能体网络再来挑战吧QWQ")

save_frames_as_gif(frames, filename="Pong-v4_trained.gif")

env.close()
