from IPython import display
from IPython.display import SVG
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as nnf

STATE_NUM = 3

def min_loss(x, y):
    return (x-y)**2

class Q(nn.Module):
    def __init__(self, state_num=STATE_NUM):
        super(Q, self).__init__()
        self.fc1 = nn.Linear(state_num, 16)
        self.fc2 = nn.Linear(16, 64)
        self.fc3 = nn.Linear(64, 2)

    def forward(self, x):
        x = nnf.leaky_relu(self.fc1(x))
        x = nnf.leaky_relu(self.fc2(x))
        x = nnf.leaky_relu(self.fc3(x))
        return x

# 智能体
class DQNAgent():
    def __init__(self, epsilon=0.99):
        self.model = Q()
        self.optimizer = torch.optim.Adam(self.model.parameters(), 0.001)
        self.epsilon = epsilon  # 选择随机动作的概率
        self.actions=[-1,1]  # 动作
        self.experienceMemory = []  # 经验回放数组
        self.memSize = 300*100  # 数组容量
        self.experienceMemory_local=[]  # 局部数组容量
        self.memPos = 0  # 数组索引
        self.batch_num = 32  # 用于学习的批大小(batch size)
        self.gamma = 0.9  # 折扣率
        self.loss_function = nn.MSELoss()
        self.total_reward_award=np.ones(100)*-1000  #100

    def get_action_value(self, seq):
        # seq以后的行动价值
        x = torch.tensor(seq.astype(np.float32))
        x = self.model(x)
        return x

    def get_greedy_action(self, seq):
        x = self.get_action_value(seq)
        action_index = torch.argmax(x)
        return self.actions[action_index]

    def reduce_epsilon(self):
        self.epsilon-=1.0/100000

    def get_epsilon(self):
        return self.epsilon

    def get_action(self,seq,train):
        '''
        seq (theta, old_theta)返回动作（电机扭矩）
        '''
        action=0
        if train==True and np.random.random()<self.epsilon:
            # random
            action = np.random.choice(self.actions)
        else:
            # greedy
            action= self.get_greedy_action(seq)
        return action

    def add_experience_local(self,old_seq, action, reward, new_seq):
        # 本地的episode记忆
        self.experienceMemory_local.append( np.hstack([old_seq, action, reward, new_seq]) )

    def add_experience_global(self,total_reward):
        # 全局记忆
        # 吸取进入前100名的经验
        if np.min(self.total_reward_award)<total_reward:
            i=np.argmin(self.total_reward_award)
            self.total_reward_award[i]=total_reward

            # GOOD EXPERIENCE REPLAY
            for x in self.experienceMemory_local:
                self.experience( x )

        # 在一定概率下也会引进不优秀的东西
        if np.random.random()<0.01:
            # # NORMAL EXPERIENCE REPLAY
            for x in self.experienceMemory_local:
                self.experience( x )

        self.experienceMemory_local=[]

    def experience(self,x):
        if len(self.experienceMemory)>self.memSize:
            self.experienceMemory[int(self.memPos%self.memSize)]=x
            self.memPos+=1
        else:
            self.experienceMemory.append( x )

    def update_model(self,old_seq, action, reward, new_seq):
        '''
        更新模型
        '''
        # 经验数组不足时不更新
        if len(self.experienceMemory)<self.batch_num:
            return

        # 从经验数组创建批处理
        memsize=len(self.experienceMemory)
        batch_index = list(np.random.randint(0, memsize, self.batch_num))
        batch =np.array( [self.experienceMemory[i] for i in batch_index ])
        x = torch.tensor(batch[:, 0:STATE_NUM].astype(np.float32))
        targets=self.model(x)

        for i in range(self.batch_num):
            # [seq..., action, reward, seq_new]
            a = batch[i,STATE_NUM]
            r = batch[i, STATE_NUM+1]
            ai=int((a+1)/2) # ±1为index(0,1)
            new_seq= batch[i,(STATE_NUM+2):(STATE_NUM*2+2)]
            targets[i,ai]=( r+ self.gamma * np.max(self.get_action_value(new_seq)))
        t = torch(np.array(targets).reshape((self.batch_num,-1)).astype(np.float32)) 

        # 网络更新
        loss = self.loss_function(x, t)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()


class PendulumEnvironment():
    '''
    钟摆摆动环境。输入动作是马达的扭矩，reward是杆前端的高度。
    '''
    def __init__(self):
        self.reset(0,0)

    def reset(self,initial_theta, initial_dtheta):
        self.th          = initial_theta
        self.th_old   = self.th
        self.th_ = initial_dtheta
        self.g=0.01
        self.highscore=-1.0

    def get_reward(self):
        '''
        如果是高加的话，5倍奖金与高成比例的正报酬
        负则与低成正比的负报酬
        '''
        reward=0
        h=-np.cos(self.th)
        if h>=0:
            reward= 5*np.abs(h)
        else:
            reward= -np.abs(h)
        return reward

    def get_state(self):
        return self.th

    def update_state(self, action):
        '''
        action 是马达的扭矩。只有符号有意义。
        正时0.005, 0时0, 负时-0.005
        '''
        power = 0.005* np.sign(action)

        self.th_ += -self.g*np.sin(self.th)+power
        self.th_old = self.th
        self.th += self.th_

# 传递环境和智能体时模拟的模拟器。
# 让这里有顺序感觉很奇怪，可以吗。。
class simulator:
    def __init__(self, environment, agent):
        self.agent = agent
        self.env = environment

        self.num_seq=STATE_NUM
        self.reset_seq()
        self.learning_rate=1.0
        self.highscore=0

    def reset_seq(self):
        self.seq=np.zeros(self.num_seq)

    def push_seq(self, state):
        self.seq[1:self.num_seq]=self.seq[0:self.num_seq-1]
        self.seq[0]=state

    def run(self, train=True):
        self.env.reset(0,0)
        self.reset_seq()
        total_reward=0
        for i in range(300):
            # 保存当前state序列
            old_seq = self.seq.copy()

            # 决定智能体的行动
            action = self.agent.get_action(old_seq, train)

            # 在环境中输入行为
            self.env.update_state(action)
            reward=self.env.get_reward()
            total_reward +=reward

            # 观察结果并更新state和序列
            state = self.env.get_state()
            self.push_seq(state)
            new_seq = self.seq.copy()

            # 小插曲存储在本地内存中
            self.agent.add_experience_local(old_seq, action, reward, new_seq)

        # 小插曲将本地内存内容转移到全局内存
        self.agent.add_experience_global(total_reward)

        if train:
            # 使用学习内存更新模型
            self.agent.update_model(old_seq, action, reward, new_seq)
            self.agent.reduce_epsilon()

        return total_reward


if __name__ == '__main__':
    agent = DQNAgent()
    env = PendulumEnvironment()
    sim = simulator(env, agent)
    test_highscore=0
    for i in range(30000):
        total_reward=sim.run(train=True)

        if i%1000 ==0:
            # serializers.save_npz('model/%06d.model'%i, agent.model)
            pass

        if i%10 == 0:
            total_reward=sim.run(train=False)
            if test_highscore<total_reward:
                print("highscore!"),
                # serializers.save_npz('model/%06d_hs.model'%i, agent.model)
                test_highscore=total_reward
            print(i),
            print(total_reward),
            print("epsilon:%2.2e" % agent.get_epsilon())
            print("loss:%2.2e" % agent.loss)
            aw=agent.total_reward_award
            print("min:%d,max:%d" % (np.min(aw),np.max(aw)))
            out="%d,%d,%2.2e,%2.2e,%d,%d\n" % (i,total_reward,agent.get_epsilon(),agent.loss, np.min(aw),np.max(aw))
